Merge pull request #20152 from karalabe/snapshot-5
Dynamic state snapshots
This commit is contained in:
		
						commit
						613af7ceea
					
				| @ -124,7 +124,7 @@ func (b *SimulatedBackend) rollback() { | |||||||
| 	statedb, _ := b.blockchain.State() | 	statedb, _ := b.blockchain.State() | ||||||
| 
 | 
 | ||||||
| 	b.pendingBlock = blocks[0] | 	b.pendingBlock = blocks[0] | ||||||
| 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) | 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // stateByBlockNumber retrieves a state by a given blocknumber.
 | // stateByBlockNumber retrieves a state by a given blocknumber.
 | ||||||
| @ -480,7 +480,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa | |||||||
| 	statedb, _ := b.blockchain.State() | 	statedb, _ := b.blockchain.State() | ||||||
| 
 | 
 | ||||||
| 	b.pendingBlock = blocks[0] | 	b.pendingBlock = blocks[0] | ||||||
| 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) | 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil) | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -593,7 +593,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { | |||||||
| 	statedb, _ := b.blockchain.State() | 	statedb, _ := b.blockchain.State() | ||||||
| 
 | 
 | ||||||
| 	b.pendingBlock = blocks[0] | 	b.pendingBlock = blocks[0] | ||||||
| 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database()) | 	b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database(), nil) | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  | |||||||
| @ -129,10 +129,10 @@ func runCmd(ctx *cli.Context) error { | |||||||
| 		genesisConfig = gen | 		genesisConfig = gen | ||||||
| 		db := rawdb.NewMemoryDatabase() | 		db := rawdb.NewMemoryDatabase() | ||||||
| 		genesis := gen.ToBlock(db) | 		genesis := gen.ToBlock(db) | ||||||
| 		statedb, _ = state.New(genesis.Root(), state.NewDatabase(db)) | 		statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil) | ||||||
| 		chainConfig = gen.Config | 		chainConfig = gen.Config | ||||||
| 	} else { | 	} else { | ||||||
| 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		genesisConfig = new(core.Genesis) | 		genesisConfig = new(core.Genesis) | ||||||
| 	} | 	} | ||||||
| 	if ctx.GlobalString(SenderFlag.Name) != "" { | 	if ctx.GlobalString(SenderFlag.Name) != "" { | ||||||
|  | |||||||
| @ -96,7 +96,7 @@ func stateTestCmd(ctx *cli.Context) error { | |||||||
| 		for _, st := range test.Subtests() { | 		for _, st := range test.Subtests() { | ||||||
| 			// Run the test and aggregate the result
 | 			// Run the test and aggregate the result
 | ||||||
| 			result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} | 			result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} | ||||||
| 			state, err := test.Run(st, cfg) | 			state, err := test.Run(st, cfg, false) | ||||||
| 			// print state root for evmlab tracing
 | 			// print state root for evmlab tracing
 | ||||||
| 			if ctx.GlobalBool(MachineFlag.Name) && state != nil { | 			if ctx.GlobalBool(MachineFlag.Name) && state != nil { | ||||||
| 				fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) | 				fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) | ||||||
|  | |||||||
| @ -79,6 +79,7 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to | |||||||
| 			utils.CacheFlag, | 			utils.CacheFlag, | ||||||
| 			utils.SyncModeFlag, | 			utils.SyncModeFlag, | ||||||
| 			utils.GCModeFlag, | 			utils.GCModeFlag, | ||||||
|  | 			utils.SnapshotFlag, | ||||||
| 			utils.CacheDatabaseFlag, | 			utils.CacheDatabaseFlag, | ||||||
| 			utils.CacheGCFlag, | 			utils.CacheGCFlag, | ||||||
| 		}, | 		}, | ||||||
| @ -544,7 +545,7 @@ func dump(ctx *cli.Context) error { | |||||||
| 			fmt.Println("{}") | 			fmt.Println("{}") | ||||||
| 			utils.Fatalf("block not found") | 			utils.Fatalf("block not found") | ||||||
| 		} else { | 		} else { | ||||||
| 			state, err := state.New(block.Root(), state.NewDatabase(chainDb)) | 			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				utils.Fatalf("could not create new state: %v", err) | 				utils.Fatalf("could not create new state: %v", err) | ||||||
| 			} | 			} | ||||||
|  | |||||||
| @ -91,6 +91,7 @@ var ( | |||||||
| 		utils.SyncModeFlag, | 		utils.SyncModeFlag, | ||||||
| 		utils.ExitWhenSyncedFlag, | 		utils.ExitWhenSyncedFlag, | ||||||
| 		utils.GCModeFlag, | 		utils.GCModeFlag, | ||||||
|  | 		utils.SnapshotFlag, | ||||||
| 		utils.LightServeFlag, | 		utils.LightServeFlag, | ||||||
| 		utils.LightLegacyServFlag, | 		utils.LightLegacyServFlag, | ||||||
| 		utils.LightIngressFlag, | 		utils.LightIngressFlag, | ||||||
| @ -106,6 +107,7 @@ var ( | |||||||
| 		utils.CacheDatabaseFlag, | 		utils.CacheDatabaseFlag, | ||||||
| 		utils.CacheTrieFlag, | 		utils.CacheTrieFlag, | ||||||
| 		utils.CacheGCFlag, | 		utils.CacheGCFlag, | ||||||
|  | 		utils.CacheSnapshotFlag, | ||||||
| 		utils.CacheNoPrefetchFlag, | 		utils.CacheNoPrefetchFlag, | ||||||
| 		utils.ListenPortFlag, | 		utils.ListenPortFlag, | ||||||
| 		utils.MaxPeersFlag, | 		utils.MaxPeersFlag, | ||||||
|  | |||||||
| @ -137,6 +137,7 @@ var AppHelpFlagGroups = []flagGroup{ | |||||||
| 			utils.CacheDatabaseFlag, | 			utils.CacheDatabaseFlag, | ||||||
| 			utils.CacheTrieFlag, | 			utils.CacheTrieFlag, | ||||||
| 			utils.CacheGCFlag, | 			utils.CacheGCFlag, | ||||||
|  | 			utils.CacheSnapshotFlag, | ||||||
| 			utils.CacheNoPrefetchFlag, | 			utils.CacheNoPrefetchFlag, | ||||||
| 		}, | 		}, | ||||||
| 	}, | 	}, | ||||||
|  | |||||||
| @ -225,6 +225,10 @@ var ( | |||||||
| 		Usage: `Blockchain garbage collection mode ("full", "archive")`, | 		Usage: `Blockchain garbage collection mode ("full", "archive")`, | ||||||
| 		Value: "full", | 		Value: "full", | ||||||
| 	} | 	} | ||||||
|  | 	SnapshotFlag = cli.BoolFlag{ | ||||||
|  | 		Name:  "snapshot", | ||||||
|  | 		Usage: `Enables snapshot-database mode -- experimental work in progress feature`, | ||||||
|  | 	} | ||||||
| 	LightKDFFlag = cli.BoolFlag{ | 	LightKDFFlag = cli.BoolFlag{ | ||||||
| 		Name:  "lightkdf", | 		Name:  "lightkdf", | ||||||
| 		Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", | 		Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", | ||||||
| @ -383,14 +387,19 @@ var ( | |||||||
| 	} | 	} | ||||||
| 	CacheTrieFlag = cli.IntFlag{ | 	CacheTrieFlag = cli.IntFlag{ | ||||||
| 		Name:  "cache.trie", | 		Name:  "cache.trie", | ||||||
| 		Usage: "Percentage of cache memory allowance to use for trie caching (default = 25% full mode, 50% archive mode)", | 		Usage: "Percentage of cache memory allowance to use for trie caching (default = 15% full mode, 30% archive mode)", | ||||||
| 		Value: 25, | 		Value: 15, | ||||||
| 	} | 	} | ||||||
| 	CacheGCFlag = cli.IntFlag{ | 	CacheGCFlag = cli.IntFlag{ | ||||||
| 		Name:  "cache.gc", | 		Name:  "cache.gc", | ||||||
| 		Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", | 		Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)", | ||||||
| 		Value: 25, | 		Value: 25, | ||||||
| 	} | 	} | ||||||
|  | 	CacheSnapshotFlag = cli.IntFlag{ | ||||||
|  | 		Name:  "cache.snapshot", | ||||||
|  | 		Usage: "Percentage of cache memory allowance to use for snapshot caching (default = 10% full mode, 20% archive mode)", | ||||||
|  | 		Value: 10, | ||||||
|  | 	} | ||||||
| 	CacheNoPrefetchFlag = cli.BoolFlag{ | 	CacheNoPrefetchFlag = cli.BoolFlag{ | ||||||
| 		Name:  "cache.noprefetch", | 		Name:  "cache.noprefetch", | ||||||
| 		Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)", | 		Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)", | ||||||
| @ -1463,6 +1472,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { | |||||||
| 	if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { | 	if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { | ||||||
| 		cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 | 		cfg.TrieDirtyCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 | ||||||
| 	} | 	} | ||||||
|  | 	if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheSnapshotFlag.Name) { | ||||||
|  | 		cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100 | ||||||
|  | 	} | ||||||
|  | 	if !ctx.GlobalIsSet(SnapshotFlag.Name) { | ||||||
|  | 		cfg.SnapshotCache = 0 // Disabled
 | ||||||
|  | 	} | ||||||
| 	if ctx.GlobalIsSet(DocRootFlag.Name) { | 	if ctx.GlobalIsSet(DocRootFlag.Name) { | ||||||
| 		cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) | 		cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) | ||||||
| 	} | 	} | ||||||
| @ -1724,6 +1739,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai | |||||||
| 		TrieDirtyLimit:      eth.DefaultConfig.TrieDirtyCache, | 		TrieDirtyLimit:      eth.DefaultConfig.TrieDirtyCache, | ||||||
| 		TrieDirtyDisabled:   ctx.GlobalString(GCModeFlag.Name) == "archive", | 		TrieDirtyDisabled:   ctx.GlobalString(GCModeFlag.Name) == "archive", | ||||||
| 		TrieTimeLimit:       eth.DefaultConfig.TrieTimeout, | 		TrieTimeLimit:       eth.DefaultConfig.TrieTimeout, | ||||||
|  | 		SnapshotLimit:       eth.DefaultConfig.SnapshotCache, | ||||||
|  | 	} | ||||||
|  | 	if !ctx.GlobalIsSet(SnapshotFlag.Name) { | ||||||
|  | 		cache.SnapshotLimit = 0 // Disabled
 | ||||||
| 	} | 	} | ||||||
| 	if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { | 	if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { | ||||||
| 		cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 | 		cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 | ||||||
|  | |||||||
| @ -34,6 +34,7 @@ import ( | |||||||
| 	"github.com/ethereum/go-ethereum/consensus" | 	"github.com/ethereum/go-ethereum/consensus" | ||||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
| 	"github.com/ethereum/go-ethereum/core/state" | 	"github.com/ethereum/go-ethereum/core/state" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/state/snapshot" | ||||||
| 	"github.com/ethereum/go-ethereum/core/types" | 	"github.com/ethereum/go-ethereum/core/types" | ||||||
| 	"github.com/ethereum/go-ethereum/core/vm" | 	"github.com/ethereum/go-ethereum/core/vm" | ||||||
| 	"github.com/ethereum/go-ethereum/ethdb" | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
| @ -61,6 +62,10 @@ var ( | |||||||
| 	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) | 	storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) | ||||||
| 	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) | 	storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) | ||||||
| 
 | 
 | ||||||
|  | 	snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) | ||||||
|  | 	snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) | ||||||
|  | 	snapshotCommitTimer      = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) | ||||||
|  | 
 | ||||||
| 	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil) | 	blockInsertTimer     = metrics.NewRegisteredTimer("chain/inserts", nil) | ||||||
| 	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) | 	blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) | ||||||
| 	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil) | 	blockExecutionTimer  = metrics.NewRegisteredTimer("chain/execution", nil) | ||||||
| @ -115,6 +120,9 @@ type CacheConfig struct { | |||||||
| 	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
 | 	TrieDirtyLimit      int           // Memory limit (MB) at which to start flushing dirty trie nodes to disk
 | ||||||
| 	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
 | 	TrieDirtyDisabled   bool          // Whether to disable trie write caching and GC altogether (archive node)
 | ||||||
| 	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
 | 	TrieTimeLimit       time.Duration // Time limit after which to flush the current in-memory trie to disk
 | ||||||
|  | 	SnapshotLimit       int           // Memory allowance (MB) to use for caching snapshot entries in memory
 | ||||||
|  | 
 | ||||||
|  | 	SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // BlockChain represents the canonical chain given a database with a genesis
 | // BlockChain represents the canonical chain given a database with a genesis
 | ||||||
| @ -136,6 +144,7 @@ type BlockChain struct { | |||||||
| 	cacheConfig *CacheConfig        // Cache configuration for pruning
 | 	cacheConfig *CacheConfig        // Cache configuration for pruning
 | ||||||
| 
 | 
 | ||||||
| 	db     ethdb.Database // Low level persistent database to store final content in
 | 	db     ethdb.Database // Low level persistent database to store final content in
 | ||||||
|  | 	snaps  *snapshot.Tree // Snapshot tree for fast trie leaf access
 | ||||||
| 	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
 | 	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
 | ||||||
| 	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
 | 	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
 | ||||||
| 
 | 
 | ||||||
| @ -188,6 +197,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par | |||||||
| 			TrieCleanLimit: 256, | 			TrieCleanLimit: 256, | ||||||
| 			TrieDirtyLimit: 256, | 			TrieDirtyLimit: 256, | ||||||
| 			TrieTimeLimit:  5 * time.Minute, | 			TrieTimeLimit:  5 * time.Minute, | ||||||
|  | 			SnapshotLimit:  256, | ||||||
|  | 			SnapshotWait:   true, | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	bodyCache, _ := lru.New(bodyCacheLimit) | 	bodyCache, _ := lru.New(bodyCacheLimit) | ||||||
| @ -293,6 +304,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par | |||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | 	// Load any existing snapshot, regenerating it if loading failed
 | ||||||
|  | 	if bc.cacheConfig.SnapshotLimit > 0 { | ||||||
|  | 		bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait) | ||||||
|  | 	} | ||||||
| 	// Take ownership of this particular state
 | 	// Take ownership of this particular state
 | ||||||
| 	go bc.update() | 	go bc.update() | ||||||
| 	return bc, nil | 	return bc, nil | ||||||
| @ -339,7 +354,7 @@ func (bc *BlockChain) loadLastState() error { | |||||||
| 		return bc.Reset() | 		return bc.Reset() | ||||||
| 	} | 	} | ||||||
| 	// Make sure the state associated with the block is available
 | 	// Make sure the state associated with the block is available
 | ||||||
| 	if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil { | 	if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil { | ||||||
| 		// Dangling block without a state associated, init from scratch
 | 		// Dangling block without a state associated, init from scratch
 | ||||||
| 		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) | 		log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash()) | ||||||
| 		if err := bc.repair(¤tBlock); err != nil { | 		if err := bc.repair(¤tBlock); err != nil { | ||||||
| @ -401,7 +416,7 @@ func (bc *BlockChain) SetHead(head uint64) error { | |||||||
| 			if newHeadBlock == nil { | 			if newHeadBlock == nil { | ||||||
| 				newHeadBlock = bc.genesisBlock | 				newHeadBlock = bc.genesisBlock | ||||||
| 			} else { | 			} else { | ||||||
| 				if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil { | 				if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { | ||||||
| 					// Rewound state missing, rolled back to before pivot, reset to genesis
 | 					// Rewound state missing, rolled back to before pivot, reset to genesis
 | ||||||
| 					newHeadBlock = bc.genesisBlock | 					newHeadBlock = bc.genesisBlock | ||||||
| 				} | 				} | ||||||
| @ -486,6 +501,10 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { | |||||||
| 	headBlockGauge.Update(int64(block.NumberU64())) | 	headBlockGauge.Update(int64(block.NumberU64())) | ||||||
| 	bc.chainmu.Unlock() | 	bc.chainmu.Unlock() | ||||||
| 
 | 
 | ||||||
|  | 	// Destroy any existing state snapshot and regenerate it in the background
 | ||||||
|  | 	if bc.snaps != nil { | ||||||
|  | 		bc.snaps.Rebuild(block.Root()) | ||||||
|  | 	} | ||||||
| 	log.Info("Committed new head block", "number", block.Number(), "hash", hash) | 	log.Info("Committed new head block", "number", block.Number(), "hash", hash) | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| @ -524,7 +543,7 @@ func (bc *BlockChain) State() (*state.StateDB, error) { | |||||||
| 
 | 
 | ||||||
| // StateAt returns a new mutable state based on a particular point in time.
 | // StateAt returns a new mutable state based on a particular point in time.
 | ||||||
| func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { | func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) { | ||||||
| 	return state.New(root, bc.stateCache) | 	return state.New(root, bc.stateCache, bc.snaps) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // StateCache returns the caching database underpinning the blockchain instance.
 | // StateCache returns the caching database underpinning the blockchain instance.
 | ||||||
| @ -576,7 +595,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { | |||||||
| func (bc *BlockChain) repair(head **types.Block) error { | func (bc *BlockChain) repair(head **types.Block) error { | ||||||
| 	for { | 	for { | ||||||
| 		// Abort if we've rewound to a head block that does have associated state
 | 		// Abort if we've rewound to a head block that does have associated state
 | ||||||
| 		if _, err := state.New((*head).Root(), bc.stateCache); err == nil { | 		if _, err := state.New((*head).Root(), bc.stateCache, bc.snaps); err == nil { | ||||||
| 			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) | 			log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash()) | ||||||
| 			return nil | 			return nil | ||||||
| 		} | 		} | ||||||
| @ -839,6 +858,14 @@ func (bc *BlockChain) Stop() { | |||||||
| 
 | 
 | ||||||
| 	bc.wg.Wait() | 	bc.wg.Wait() | ||||||
| 
 | 
 | ||||||
|  | 	// Ensure that the entirety of the state snapshot is journalled to disk.
 | ||||||
|  | 	var snapBase common.Hash | ||||||
|  | 	if bc.snaps != nil { | ||||||
|  | 		var err error | ||||||
|  | 		if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { | ||||||
|  | 			log.Error("Failed to journal state snapshot", "err", err) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 	// Ensure the state of a recent block is also stored to disk before exiting.
 | 	// Ensure the state of a recent block is also stored to disk before exiting.
 | ||||||
| 	// We're writing three different states to catch different restart scenarios:
 | 	// We're writing three different states to catch different restart scenarios:
 | ||||||
| 	//  - HEAD:     So we don't need to reprocess any blocks in the general case
 | 	//  - HEAD:     So we don't need to reprocess any blocks in the general case
 | ||||||
| @ -857,6 +884,12 @@ func (bc *BlockChain) Stop() { | |||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 		if snapBase != (common.Hash{}) { | ||||||
|  | 			log.Info("Writing snapshot state to disk", "root", snapBase) | ||||||
|  | 			if err := triedb.Commit(snapBase, true); err != nil { | ||||||
|  | 				log.Error("Failed to commit recent state trie", "err", err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
| 		for !bc.triegc.Empty() { | 		for !bc.triegc.Empty() { | ||||||
| 			triedb.Dereference(bc.triegc.PopItem().(common.Hash)) | 			triedb.Dereference(bc.triegc.PopItem().(common.Hash)) | ||||||
| 		} | 		} | ||||||
| @ -1647,7 +1680,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er | |||||||
| 		if parent == nil { | 		if parent == nil { | ||||||
| 			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) | 			parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) | ||||||
| 		} | 		} | ||||||
| 		statedb, err := state.New(parent.Root, bc.stateCache) | 		statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return it.index, err | 			return it.index, err | ||||||
| 		} | 		} | ||||||
| @ -1656,9 +1689,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er | |||||||
| 		var followupInterrupt uint32 | 		var followupInterrupt uint32 | ||||||
| 		if !bc.cacheConfig.TrieCleanNoPrefetch { | 		if !bc.cacheConfig.TrieCleanNoPrefetch { | ||||||
| 			if followup, err := it.peek(); followup != nil && err == nil { | 			if followup, err := it.peek(); followup != nil && err == nil { | ||||||
| 				throwaway, _ := state.New(parent.Root, bc.stateCache) | 				throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) | ||||||
| 				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { | 				go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { | ||||||
| 					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, interrupt) | 					bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) | ||||||
| 
 | 
 | ||||||
| 					blockPrefetchExecuteTimer.Update(time.Since(start)) | 					blockPrefetchExecuteTimer.Update(time.Since(start)) | ||||||
| 					if atomic.LoadUint32(interrupt) == 1 { | 					if atomic.LoadUint32(interrupt) == 1 { | ||||||
| @ -1676,14 +1709,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er | |||||||
| 			return it.index, err | 			return it.index, err | ||||||
| 		} | 		} | ||||||
| 		// Update the metrics touched during block processing
 | 		// Update the metrics touched during block processing
 | ||||||
| 		accountReadTimer.Update(statedb.AccountReads)     // Account reads are complete, we can mark them
 | 		accountReadTimer.Update(statedb.AccountReads)                 // Account reads are complete, we can mark them
 | ||||||
| 		storageReadTimer.Update(statedb.StorageReads)     // Storage reads are complete, we can mark them
 | 		storageReadTimer.Update(statedb.StorageReads)                 // Storage reads are complete, we can mark them
 | ||||||
| 		accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete, we can mark them
 | 		accountUpdateTimer.Update(statedb.AccountUpdates)             // Account updates are complete, we can mark them
 | ||||||
| 		storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
 | 		storageUpdateTimer.Update(statedb.StorageUpdates)             // Storage updates are complete, we can mark them
 | ||||||
|  | 		snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
 | ||||||
|  | 		snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
 | ||||||
| 
 | 
 | ||||||
| 		triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
 | 		triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation
 | ||||||
| 		trieproc := statedb.AccountReads + statedb.AccountUpdates | 		trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates | ||||||
| 		trieproc += statedb.StorageReads + statedb.StorageUpdates | 		trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates | ||||||
| 
 | 
 | ||||||
| 		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) | 		blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) | ||||||
| 
 | 
 | ||||||
| @ -1712,10 +1747,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er | |||||||
| 		atomic.StoreUint32(&followupInterrupt, 1) | 		atomic.StoreUint32(&followupInterrupt, 1) | ||||||
| 
 | 
 | ||||||
| 		// Update the metrics touched during block commit
 | 		// Update the metrics touched during block commit
 | ||||||
| 		accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
 | 		accountCommitTimer.Update(statedb.AccountCommits)   // Account commits are complete, we can mark them
 | ||||||
| 		storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
 | 		storageCommitTimer.Update(statedb.StorageCommits)   // Storage commits are complete, we can mark them
 | ||||||
|  | 		snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
 | ||||||
| 
 | 
 | ||||||
| 		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits) | 		blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) | ||||||
| 		blockInsertTimer.UpdateSince(start) | 		blockInsertTimer.UpdateSince(start) | ||||||
| 
 | 
 | ||||||
| 		switch status { | 		switch status { | ||||||
|  | |||||||
| @ -144,7 +144,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error { | |||||||
| 			} | 			} | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 		statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache) | 		statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| @ -2315,7 +2315,7 @@ func TestDeleteCreateRevert(t *testing.T) { | |||||||
| 				// The address 0xAAAAA selfdestructs if called
 | 				// The address 0xAAAAA selfdestructs if called
 | ||||||
| 				aa: { | 				aa: { | ||||||
| 					// Code needs to just selfdestruct
 | 					// Code needs to just selfdestruct
 | ||||||
| 					Code:    []byte{byte(vm.PC), 0xFF}, | 					Code:    []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)}, | ||||||
| 					Nonce:   1, | 					Nonce:   1, | ||||||
| 					Balance: big.NewInt(0), | 					Balance: big.NewInt(0), | ||||||
| 				}, | 				}, | ||||||
| @ -2362,3 +2362,522 @@ func TestDeleteCreateRevert(t *testing.T) { | |||||||
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err) | 		t.Fatalf("block %d: failed to insert into chain: %v", n, err) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // TestDeleteRecreateSlots tests a state-transition that contains both deletion
 | ||||||
|  | // and recreation of contract state.
 | ||||||
|  | // Contract A exists, has slots 1 and 2 set
 | ||||||
|  | // Tx 1: Selfdestruct A
 | ||||||
|  | // Tx 2: Re-create A, set slots 3 and 4
 | ||||||
|  | // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
 | ||||||
|  | // and then the new slots exist
 | ||||||
|  | func TestDeleteRecreateSlots(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		// Generate a canonical chain to act as the main dataset
 | ||||||
|  | 		engine = ethash.NewFaker() | ||||||
|  | 		db     = rawdb.NewMemoryDatabase() | ||||||
|  | 		// A sender who makes transactions, has some funds
 | ||||||
|  | 		key, _    = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||||
|  | 		address   = crypto.PubkeyToAddress(key.PublicKey) | ||||||
|  | 		funds     = big.NewInt(1000000000) | ||||||
|  | 		bb        = common.HexToAddress("0x000000000000000000000000000000000000bbbb") | ||||||
|  | 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | ||||||
|  | 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | ||||||
|  | 	) | ||||||
|  | 	// Populate two slots
 | ||||||
|  | 	aaStorage[common.HexToHash("01")] = common.HexToHash("01") | ||||||
|  | 	aaStorage[common.HexToHash("02")] = common.HexToHash("02") | ||||||
|  | 
 | ||||||
|  | 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | ||||||
|  | 	// both initcode and deployment code
 | ||||||
|  | 	// initcode:
 | ||||||
|  | 	// 1. Set slots 3=3, 4=4,
 | ||||||
|  | 	// 2. Return aaCode
 | ||||||
|  | 
 | ||||||
|  | 	initCode := []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x3, // value
 | ||||||
|  | 		byte(vm.PUSH1), 0x3, // location
 | ||||||
|  | 		byte(vm.SSTORE),     // Set slot[3] = 1
 | ||||||
|  | 		byte(vm.PUSH1), 0x4, // value
 | ||||||
|  | 		byte(vm.PUSH1), 0x4, // location
 | ||||||
|  | 		byte(vm.SSTORE), // Set slot[4] = 1
 | ||||||
|  | 		// Slots are set, now return the code
 | ||||||
|  | 		byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
 | ||||||
|  | 		byte(vm.PUSH1), 0x0, // memory start on stack
 | ||||||
|  | 		byte(vm.MSTORE), | ||||||
|  | 		// Code is now in memory.
 | ||||||
|  | 		byte(vm.PUSH1), 0x2, // size
 | ||||||
|  | 		byte(vm.PUSH1), byte(32 - 2), // offset
 | ||||||
|  | 		byte(vm.RETURN), | ||||||
|  | 	} | ||||||
|  | 	if l := len(initCode); l > 32 { | ||||||
|  | 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") | ||||||
|  | 	} | ||||||
|  | 	bbCode := []byte{ | ||||||
|  | 		// Push initcode onto stack
 | ||||||
|  | 		byte(vm.PUSH1) + byte(len(initCode)-1)} | ||||||
|  | 	bbCode = append(bbCode, initCode...) | ||||||
|  | 	bbCode = append(bbCode, []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x0, // memory start on stack
 | ||||||
|  | 		byte(vm.MSTORE), | ||||||
|  | 		byte(vm.PUSH1), 0x00, // salt
 | ||||||
|  | 		byte(vm.PUSH1), byte(len(initCode)), // size
 | ||||||
|  | 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | ||||||
|  | 		byte(vm.PUSH1), 0x00, // endowment
 | ||||||
|  | 		byte(vm.CREATE2), | ||||||
|  | 	}...) | ||||||
|  | 
 | ||||||
|  | 	initHash := crypto.Keccak256Hash(initCode) | ||||||
|  | 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) | ||||||
|  | 	t.Logf("Destination address: %x\n", aa) | ||||||
|  | 
 | ||||||
|  | 	gspec := &Genesis{ | ||||||
|  | 		Config: params.TestChainConfig, | ||||||
|  | 		Alloc: GenesisAlloc{ | ||||||
|  | 			address: {Balance: funds}, | ||||||
|  | 			// The address 0xAAAAA selfdestructs if called
 | ||||||
|  | 			aa: { | ||||||
|  | 				// Code needs to just selfdestruct
 | ||||||
|  | 				Code:    aaCode, | ||||||
|  | 				Nonce:   1, | ||||||
|  | 				Balance: big.NewInt(0), | ||||||
|  | 				Storage: aaStorage, | ||||||
|  | 			}, | ||||||
|  | 			// The contract BB recreates AA
 | ||||||
|  | 			bb: { | ||||||
|  | 				Code:    bbCode, | ||||||
|  | 				Balance: big.NewInt(1), | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	genesis := gspec.MustCommit(db) | ||||||
|  | 
 | ||||||
|  | 	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) { | ||||||
|  | 		b.SetCoinbase(common.Address{1}) | ||||||
|  | 		// One transaction to AA, to kill it
 | ||||||
|  | 		tx, _ := types.SignTx(types.NewTransaction(0, aa, | ||||||
|  | 			big.NewInt(0), 50000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		b.AddTx(tx) | ||||||
|  | 		// One transaction to BB, to recreate AA
 | ||||||
|  | 		tx, _ = types.SignTx(types.NewTransaction(1, bb, | ||||||
|  | 			big.NewInt(0), 100000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		b.AddTx(tx) | ||||||
|  | 	}) | ||||||
|  | 	// Import the canonical chain
 | ||||||
|  | 	diskdb := rawdb.NewMemoryDatabase() | ||||||
|  | 	gspec.MustCommit(diskdb) | ||||||
|  | 	chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ | ||||||
|  | 		Debug:  true, | ||||||
|  | 		Tracer: vm.NewJSONLogger(nil, os.Stdout), | ||||||
|  | 	}, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("failed to create tester chain: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n, err := chain.InsertChain(blocks); err != nil { | ||||||
|  | 		t.Fatalf("block %d: failed to insert into chain: %v", n, err) | ||||||
|  | 	} | ||||||
|  | 	statedb, _ := chain.State() | ||||||
|  | 
 | ||||||
|  | 	// If all is correct, then slot 1 and 2 are zero
 | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { | ||||||
|  | 		t.Errorf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { | ||||||
|  | 		t.Errorf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | 	// Also, 3 and 4 should be set
 | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp { | ||||||
|  | 		t.Fatalf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp { | ||||||
|  | 		t.Fatalf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestDeleteRecreateAccount tests a state-transition that contains deletion of a
 | ||||||
|  | // contract with storage, and a recreate of the same contract via a
 | ||||||
|  | // regular value-transfer
 | ||||||
|  | // Expected outcome is that _all_ slots are cleared from A
 | ||||||
|  | func TestDeleteRecreateAccount(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		// Generate a canonical chain to act as the main dataset
 | ||||||
|  | 		engine = ethash.NewFaker() | ||||||
|  | 		db     = rawdb.NewMemoryDatabase() | ||||||
|  | 		// A sender who makes transactions, has some funds
 | ||||||
|  | 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||||
|  | 		address = crypto.PubkeyToAddress(key.PublicKey) | ||||||
|  | 		funds   = big.NewInt(1000000000) | ||||||
|  | 
 | ||||||
|  | 		aa        = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43") | ||||||
|  | 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | ||||||
|  | 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | ||||||
|  | 	) | ||||||
|  | 	// Populate two slots
 | ||||||
|  | 	aaStorage[common.HexToHash("01")] = common.HexToHash("01") | ||||||
|  | 	aaStorage[common.HexToHash("02")] = common.HexToHash("02") | ||||||
|  | 
 | ||||||
|  | 	gspec := &Genesis{ | ||||||
|  | 		Config: params.TestChainConfig, | ||||||
|  | 		Alloc: GenesisAlloc{ | ||||||
|  | 			address: {Balance: funds}, | ||||||
|  | 			// The address 0xAAAAA selfdestructs if called
 | ||||||
|  | 			aa: { | ||||||
|  | 				// Code needs to just selfdestruct
 | ||||||
|  | 				Code:    aaCode, | ||||||
|  | 				Nonce:   1, | ||||||
|  | 				Balance: big.NewInt(0), | ||||||
|  | 				Storage: aaStorage, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	genesis := gspec.MustCommit(db) | ||||||
|  | 
 | ||||||
|  | 	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) { | ||||||
|  | 		b.SetCoinbase(common.Address{1}) | ||||||
|  | 		// One transaction to AA, to kill it
 | ||||||
|  | 		tx, _ := types.SignTx(types.NewTransaction(0, aa, | ||||||
|  | 			big.NewInt(0), 50000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		b.AddTx(tx) | ||||||
|  | 		// One transaction to AA, to recreate it (but without storage
 | ||||||
|  | 		tx, _ = types.SignTx(types.NewTransaction(1, aa, | ||||||
|  | 			big.NewInt(1), 100000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		b.AddTx(tx) | ||||||
|  | 	}) | ||||||
|  | 	// Import the canonical chain
 | ||||||
|  | 	diskdb := rawdb.NewMemoryDatabase() | ||||||
|  | 	gspec.MustCommit(diskdb) | ||||||
|  | 	chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ | ||||||
|  | 		Debug:  true, | ||||||
|  | 		Tracer: vm.NewJSONLogger(nil, os.Stdout), | ||||||
|  | 	}, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("failed to create tester chain: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n, err := chain.InsertChain(blocks); err != nil { | ||||||
|  | 		t.Fatalf("block %d: failed to insert into chain: %v", n, err) | ||||||
|  | 	} | ||||||
|  | 	statedb, _ := chain.State() | ||||||
|  | 
 | ||||||
|  | 	// If all is correct, then both slots are zero
 | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { | ||||||
|  | 		t.Errorf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | 	if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { | ||||||
|  | 		t.Errorf("got %x exp %x", got, exp) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion
 | ||||||
|  | // and recreation of contract state.
 | ||||||
|  | // Contract A exists, has slots 1 and 2 set
 | ||||||
|  | // Tx 1: Selfdestruct A
 | ||||||
|  | // Tx 2: Re-create A, set slots 3 and 4
 | ||||||
|  | // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
 | ||||||
|  | // and then the new slots exist
 | ||||||
|  | func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		// Generate a canonical chain to act as the main dataset
 | ||||||
|  | 		engine = ethash.NewFaker() | ||||||
|  | 		db     = rawdb.NewMemoryDatabase() | ||||||
|  | 		// A sender who makes transactions, has some funds
 | ||||||
|  | 		key, _    = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||||
|  | 		address   = crypto.PubkeyToAddress(key.PublicKey) | ||||||
|  | 		funds     = big.NewInt(1000000000) | ||||||
|  | 		bb        = common.HexToAddress("0x000000000000000000000000000000000000bbbb") | ||||||
|  | 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | ||||||
|  | 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | ||||||
|  | 	) | ||||||
|  | 	// Populate two slots
 | ||||||
|  | 	aaStorage[common.HexToHash("01")] = common.HexToHash("01") | ||||||
|  | 	aaStorage[common.HexToHash("02")] = common.HexToHash("02") | ||||||
|  | 
 | ||||||
|  | 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | ||||||
|  | 	// both initcode and deployment code
 | ||||||
|  | 	// initcode:
 | ||||||
|  | 	// 1. Set slots 3=blocknum+1, 4=4,
 | ||||||
|  | 	// 2. Return aaCode
 | ||||||
|  | 
 | ||||||
|  | 	initCode := []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x1, //
 | ||||||
|  | 		byte(vm.NUMBER),     // value = number + 1
 | ||||||
|  | 		byte(vm.ADD),        //
 | ||||||
|  | 		byte(vm.PUSH1), 0x3, // location
 | ||||||
|  | 		byte(vm.SSTORE),     // Set slot[3] = number + 1
 | ||||||
|  | 		byte(vm.PUSH1), 0x4, // value
 | ||||||
|  | 		byte(vm.PUSH1), 0x4, // location
 | ||||||
|  | 		byte(vm.SSTORE), // Set slot[4] = 4
 | ||||||
|  | 		// Slots are set, now return the code
 | ||||||
|  | 		byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
 | ||||||
|  | 		byte(vm.PUSH1), 0x0, // memory start on stack
 | ||||||
|  | 		byte(vm.MSTORE), | ||||||
|  | 		// Code is now in memory.
 | ||||||
|  | 		byte(vm.PUSH1), 0x2, // size
 | ||||||
|  | 		byte(vm.PUSH1), byte(32 - 2), // offset
 | ||||||
|  | 		byte(vm.RETURN), | ||||||
|  | 	} | ||||||
|  | 	if l := len(initCode); l > 32 { | ||||||
|  | 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") | ||||||
|  | 	} | ||||||
|  | 	bbCode := []byte{ | ||||||
|  | 		// Push initcode onto stack
 | ||||||
|  | 		byte(vm.PUSH1) + byte(len(initCode)-1)} | ||||||
|  | 	bbCode = append(bbCode, initCode...) | ||||||
|  | 	bbCode = append(bbCode, []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x0, // memory start on stack
 | ||||||
|  | 		byte(vm.MSTORE), | ||||||
|  | 		byte(vm.PUSH1), 0x00, // salt
 | ||||||
|  | 		byte(vm.PUSH1), byte(len(initCode)), // size
 | ||||||
|  | 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | ||||||
|  | 		byte(vm.PUSH1), 0x00, // endowment
 | ||||||
|  | 		byte(vm.CREATE2), | ||||||
|  | 	}...) | ||||||
|  | 
 | ||||||
|  | 	initHash := crypto.Keccak256Hash(initCode) | ||||||
|  | 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) | ||||||
|  | 	t.Logf("Destination address: %x\n", aa) | ||||||
|  | 	gspec := &Genesis{ | ||||||
|  | 		Config: params.TestChainConfig, | ||||||
|  | 		Alloc: GenesisAlloc{ | ||||||
|  | 			address: {Balance: funds}, | ||||||
|  | 			// The address 0xAAAAA selfdestructs if called
 | ||||||
|  | 			aa: { | ||||||
|  | 				// Code needs to just selfdestruct
 | ||||||
|  | 				Code:    aaCode, | ||||||
|  | 				Nonce:   1, | ||||||
|  | 				Balance: big.NewInt(0), | ||||||
|  | 				Storage: aaStorage, | ||||||
|  | 			}, | ||||||
|  | 			// The contract BB recreates AA
 | ||||||
|  | 			bb: { | ||||||
|  | 				Code:    bbCode, | ||||||
|  | 				Balance: big.NewInt(1), | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	genesis := gspec.MustCommit(db) | ||||||
|  | 	var nonce uint64 | ||||||
|  | 
 | ||||||
|  | 	type expectation struct { | ||||||
|  | 		exist    bool | ||||||
|  | 		blocknum int | ||||||
|  | 		values   map[int]int | ||||||
|  | 	} | ||||||
|  | 	var current = &expectation{ | ||||||
|  | 		exist:    true, // exists in genesis
 | ||||||
|  | 		blocknum: 0, | ||||||
|  | 		values:   map[int]int{1: 1, 2: 2}, | ||||||
|  | 	} | ||||||
|  | 	var expectations []*expectation | ||||||
|  | 	var newDestruct = func(e *expectation) *types.Transaction { | ||||||
|  | 		tx, _ := types.SignTx(types.NewTransaction(nonce, aa, | ||||||
|  | 			big.NewInt(0), 50000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		nonce++ | ||||||
|  | 		if e.exist { | ||||||
|  | 			e.exist = false | ||||||
|  | 			e.values = nil | ||||||
|  | 		} | ||||||
|  | 		t.Logf("block %d; adding destruct\n", e.blocknum) | ||||||
|  | 		return tx | ||||||
|  | 	} | ||||||
|  | 	var newResurrect = func(e *expectation) *types.Transaction { | ||||||
|  | 		tx, _ := types.SignTx(types.NewTransaction(nonce, bb, | ||||||
|  | 			big.NewInt(0), 100000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		nonce++ | ||||||
|  | 		if !e.exist { | ||||||
|  | 			e.exist = true | ||||||
|  | 			e.values = map[int]int{3: e.blocknum + 1, 4: 4} | ||||||
|  | 		} | ||||||
|  | 		t.Logf("block %d; adding resurrect\n", e.blocknum) | ||||||
|  | 		return tx | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 150, func(i int, b *BlockGen) { | ||||||
|  | 		var exp = new(expectation) | ||||||
|  | 		exp.blocknum = i + 1 | ||||||
|  | 		exp.values = make(map[int]int) | ||||||
|  | 		for k, v := range current.values { | ||||||
|  | 			exp.values[k] = v | ||||||
|  | 		} | ||||||
|  | 		exp.exist = current.exist | ||||||
|  | 
 | ||||||
|  | 		b.SetCoinbase(common.Address{1}) | ||||||
|  | 		if i%2 == 0 { | ||||||
|  | 			b.AddTx(newDestruct(exp)) | ||||||
|  | 		} | ||||||
|  | 		if i%3 == 0 { | ||||||
|  | 			b.AddTx(newResurrect(exp)) | ||||||
|  | 		} | ||||||
|  | 		if i%5 == 0 { | ||||||
|  | 			b.AddTx(newDestruct(exp)) | ||||||
|  | 		} | ||||||
|  | 		if i%7 == 0 { | ||||||
|  | 			b.AddTx(newResurrect(exp)) | ||||||
|  | 		} | ||||||
|  | 		expectations = append(expectations, exp) | ||||||
|  | 		current = exp | ||||||
|  | 	}) | ||||||
|  | 	// Import the canonical chain
 | ||||||
|  | 	diskdb := rawdb.NewMemoryDatabase() | ||||||
|  | 	gspec.MustCommit(diskdb) | ||||||
|  | 	chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ | ||||||
|  | 		//Debug:  true,
 | ||||||
|  | 		//Tracer: vm.NewJSONLogger(nil, os.Stdout),
 | ||||||
|  | 	}, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("failed to create tester chain: %v", err) | ||||||
|  | 	} | ||||||
|  | 	var asHash = func(num int) common.Hash { | ||||||
|  | 		return common.BytesToHash([]byte{byte(num)}) | ||||||
|  | 	} | ||||||
|  | 	for i, block := range blocks { | ||||||
|  | 		blockNum := i + 1 | ||||||
|  | 		if n, err := chain.InsertChain([]*types.Block{block}); err != nil { | ||||||
|  | 			t.Fatalf("block %d: failed to insert into chain: %v", n, err) | ||||||
|  | 		} | ||||||
|  | 		statedb, _ := chain.State() | ||||||
|  | 		// If all is correct, then slot 1 and 2 are zero
 | ||||||
|  | 		if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { | ||||||
|  | 			t.Errorf("block %d, got %x exp %x", blockNum, got, exp) | ||||||
|  | 		} | ||||||
|  | 		if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { | ||||||
|  | 			t.Errorf("block %d, got %x exp %x", blockNum, got, exp) | ||||||
|  | 		} | ||||||
|  | 		exp := expectations[i] | ||||||
|  | 		if exp.exist { | ||||||
|  | 			if !statedb.Exist(aa) { | ||||||
|  | 				t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa) | ||||||
|  | 			} | ||||||
|  | 			for slot, val := range exp.values { | ||||||
|  | 				if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue { | ||||||
|  | 					t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			if statedb.Exist(aa) { | ||||||
|  | 				t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestInitThenFailCreateContract tests a pretty notorious case that happened
 | ||||||
|  | // on mainnet over blocks 7338108, 7338110 and 7338115.
 | ||||||
|  | // - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
 | ||||||
|  | //   with 0.001 ether (thus created but no code)
 | ||||||
|  | // - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
 | ||||||
|  | //   the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
 | ||||||
|  | //   deployment fails due to OOG during initcode execution
 | ||||||
|  | // - Block 7338115: another tx checks the balance of
 | ||||||
|  | //   e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
 | ||||||
|  | //   zero.
 | ||||||
|  | //
 | ||||||
|  | // The problem being that the snapshotter maintains a destructset, and adds items
 | ||||||
|  | // to the destructset in case something is created "onto" an existing item.
 | ||||||
|  | // We need to either roll back the snapDestructs, or not place it into snapDestructs
 | ||||||
|  | // in the first place.
 | ||||||
|  | //
 | ||||||
|  | func TestInitThenFailCreateContract(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		// Generate a canonical chain to act as the main dataset
 | ||||||
|  | 		engine = ethash.NewFaker() | ||||||
|  | 		db     = rawdb.NewMemoryDatabase() | ||||||
|  | 		// A sender who makes transactions, has some funds
 | ||||||
|  | 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||||
|  | 		address = crypto.PubkeyToAddress(key.PublicKey) | ||||||
|  | 		funds   = big.NewInt(1000000000) | ||||||
|  | 		bb      = common.HexToAddress("0x000000000000000000000000000000000000bbbb") | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | ||||||
|  | 	// both initcode and deployment code
 | ||||||
|  | 	// initcode:
 | ||||||
|  | 	// 1. If blocknum < 1, error out (e.g invalid opcode)
 | ||||||
|  | 	// 2. else, return a snippet of code
 | ||||||
|  | 	initCode := []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x1, // y (2)
 | ||||||
|  | 		byte(vm.NUMBER), // x (number)
 | ||||||
|  | 		byte(vm.GT),     // x > y?
 | ||||||
|  | 		byte(vm.PUSH1), byte(0x8), | ||||||
|  | 		byte(vm.JUMPI), // jump to label if number > 2
 | ||||||
|  | 		byte(0xFE),     // illegal opcode
 | ||||||
|  | 		byte(vm.JUMPDEST), | ||||||
|  | 		byte(vm.PUSH1), 0x2, // size
 | ||||||
|  | 		byte(vm.PUSH1), 0x0, // offset
 | ||||||
|  | 		byte(vm.RETURN), // return 2 bytes of zero-code
 | ||||||
|  | 	} | ||||||
|  | 	if l := len(initCode); l > 32 { | ||||||
|  | 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") | ||||||
|  | 	} | ||||||
|  | 	bbCode := []byte{ | ||||||
|  | 		// Push initcode onto stack
 | ||||||
|  | 		byte(vm.PUSH1) + byte(len(initCode)-1)} | ||||||
|  | 	bbCode = append(bbCode, initCode...) | ||||||
|  | 	bbCode = append(bbCode, []byte{ | ||||||
|  | 		byte(vm.PUSH1), 0x0, // memory start on stack
 | ||||||
|  | 		byte(vm.MSTORE), | ||||||
|  | 		byte(vm.PUSH1), 0x00, // salt
 | ||||||
|  | 		byte(vm.PUSH1), byte(len(initCode)), // size
 | ||||||
|  | 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | ||||||
|  | 		byte(vm.PUSH1), 0x00, // endowment
 | ||||||
|  | 		byte(vm.CREATE2), | ||||||
|  | 	}...) | ||||||
|  | 
 | ||||||
|  | 	initHash := crypto.Keccak256Hash(initCode) | ||||||
|  | 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) | ||||||
|  | 	t.Logf("Destination address: %x\n", aa) | ||||||
|  | 
 | ||||||
|  | 	gspec := &Genesis{ | ||||||
|  | 		Config: params.TestChainConfig, | ||||||
|  | 		Alloc: GenesisAlloc{ | ||||||
|  | 			address: {Balance: funds}, | ||||||
|  | 			// The address aa has some funds
 | ||||||
|  | 			aa: {Balance: big.NewInt(100000)}, | ||||||
|  | 			// The contract BB tries to create code onto AA
 | ||||||
|  | 			bb: { | ||||||
|  | 				Code:    bbCode, | ||||||
|  | 				Balance: big.NewInt(1), | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	genesis := gspec.MustCommit(db) | ||||||
|  | 	nonce := uint64(0) | ||||||
|  | 	blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 4, func(i int, b *BlockGen) { | ||||||
|  | 		b.SetCoinbase(common.Address{1}) | ||||||
|  | 		// One transaction to BB
 | ||||||
|  | 		tx, _ := types.SignTx(types.NewTransaction(nonce, bb, | ||||||
|  | 			big.NewInt(0), 100000, big.NewInt(1), nil), types.HomesteadSigner{}, key) | ||||||
|  | 		b.AddTx(tx) | ||||||
|  | 		nonce++ | ||||||
|  | 	}) | ||||||
|  | 
 | ||||||
|  | 	// Import the canonical chain
 | ||||||
|  | 	diskdb := rawdb.NewMemoryDatabase() | ||||||
|  | 	gspec.MustCommit(diskdb) | ||||||
|  | 	chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ | ||||||
|  | 		//Debug:  true,
 | ||||||
|  | 		//Tracer: vm.NewJSONLogger(nil, os.Stdout),
 | ||||||
|  | 	}, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("failed to create tester chain: %v", err) | ||||||
|  | 	} | ||||||
|  | 	statedb, _ := chain.State() | ||||||
|  | 	if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 { | ||||||
|  | 		t.Fatalf("Genesis err, got %v exp %v", got, exp) | ||||||
|  | 	} | ||||||
|  | 	// First block tries to create, but fails
 | ||||||
|  | 	{ | ||||||
|  | 		block := blocks[0] | ||||||
|  | 		if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil { | ||||||
|  | 			t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) | ||||||
|  | 		} | ||||||
|  | 		statedb, _ = chain.State() | ||||||
|  | 		if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 { | ||||||
|  | 			t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Import the rest of the blocks
 | ||||||
|  | 	for _, block := range blocks[1:] { | ||||||
|  | 		if _, err := chain.InsertChain([]*types.Block{block}); err != nil { | ||||||
|  | 			t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | |||||||
| @ -228,7 +228,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse | |||||||
| 		return nil, nil | 		return nil, nil | ||||||
| 	} | 	} | ||||||
| 	for i := 0; i < n; i++ { | 	for i := 0; i < n; i++ { | ||||||
| 		statedb, err := state.New(parent.Root(), state.NewDatabase(db)) | 		statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			panic(err) | 			panic(err) | ||||||
| 		} | 		} | ||||||
|  | |||||||
| @ -178,7 +178,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override | |||||||
| 	// We have the genesis block in database(perhaps in ancient database)
 | 	// We have the genesis block in database(perhaps in ancient database)
 | ||||||
| 	// but the corresponding state is missing.
 | 	// but the corresponding state is missing.
 | ||||||
| 	header := rawdb.ReadHeader(db, stored, 0) | 	header := rawdb.ReadHeader(db, stored, 0) | ||||||
| 	if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0)); err != nil { | 	if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0), nil); err != nil { | ||||||
| 		if genesis == nil { | 		if genesis == nil { | ||||||
| 			genesis = DefaultGenesisBlock() | 			genesis = DefaultGenesisBlock() | ||||||
| 		} | 		} | ||||||
| @ -259,7 +259,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { | |||||||
| 	if db == nil { | 	if db == nil { | ||||||
| 		db = rawdb.NewMemoryDatabase() | 		db = rawdb.NewMemoryDatabase() | ||||||
| 	} | 	} | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(db)) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(db), nil) | ||||||
| 	for addr, account := range g.Alloc { | 	for addr, account := range g.Alloc { | ||||||
| 		statedb.AddBalance(addr, account.Balance) | 		statedb.AddBalance(addr, account.Balance) | ||||||
| 		statedb.SetCode(addr, account.Code) | 		statedb.SetCode(addr, account.Code) | ||||||
|  | |||||||
							
								
								
									
										120
									
								
								core/rawdb/accessors_snapshot.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								core/rawdb/accessors_snapshot.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,120 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package rawdb | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/log" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ReadSnapshotRoot retrieves the root of the block whose state is contained in
 | ||||||
|  | // the persisted snapshot.
 | ||||||
|  | func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { | ||||||
|  | 	data, _ := db.Get(snapshotRootKey) | ||||||
|  | 	if len(data) != common.HashLength { | ||||||
|  | 		return common.Hash{} | ||||||
|  | 	} | ||||||
|  | 	return common.BytesToHash(data) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteSnapshotRoot stores the root of the block whose state is contained in
 | ||||||
|  | // the persisted snapshot.
 | ||||||
|  | func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { | ||||||
|  | 	if err := db.Put(snapshotRootKey, root[:]); err != nil { | ||||||
|  | 		log.Crit("Failed to store snapshot root", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DeleteSnapshotRoot deletes the hash of the block whose state is contained in
 | ||||||
|  | // the persisted snapshot. Since snapshots are not immutable, this  method can
 | ||||||
|  | // be used during updates, so a crash or failure will mark the entire snapshot
 | ||||||
|  | // invalid.
 | ||||||
|  | func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { | ||||||
|  | 	if err := db.Delete(snapshotRootKey); err != nil { | ||||||
|  | 		log.Crit("Failed to remove snapshot root", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
 | ||||||
|  | func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte { | ||||||
|  | 	data, _ := db.Get(accountSnapshotKey(hash)) | ||||||
|  | 	return data | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
 | ||||||
|  | func WriteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash, entry []byte) { | ||||||
|  | 	if err := db.Put(accountSnapshotKey(hash), entry); err != nil { | ||||||
|  | 		log.Crit("Failed to store account snapshot", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
 | ||||||
|  | func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) { | ||||||
|  | 	if err := db.Delete(accountSnapshotKey(hash)); err != nil { | ||||||
|  | 		log.Crit("Failed to delete account snapshot", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
 | ||||||
|  | func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte { | ||||||
|  | 	data, _ := db.Get(storageSnapshotKey(accountHash, storageHash)) | ||||||
|  | 	return data | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
 | ||||||
|  | func WriteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) { | ||||||
|  | 	if err := db.Put(storageSnapshotKey(accountHash, storageHash), entry); err != nil { | ||||||
|  | 		log.Crit("Failed to store storage snapshot", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
 | ||||||
|  | func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash common.Hash) { | ||||||
|  | 	if err := db.Delete(storageSnapshotKey(accountHash, storageHash)); err != nil { | ||||||
|  | 		log.Crit("Failed to delete storage snapshot", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IterateStorageSnapshots returns an iterator for walking the entire storage
 | ||||||
|  | // space of a specific account.
 | ||||||
|  | func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator { | ||||||
|  | 	return db.NewIteratorWithPrefix(storageSnapshotsKey(accountHash)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
 | ||||||
|  | // the last shutdown. The blob is expected to be max a few 10s of megabytes.
 | ||||||
|  | func ReadSnapshotJournal(db ethdb.KeyValueReader) []byte { | ||||||
|  | 	data, _ := db.Get(snapshotJournalKey) | ||||||
|  | 	return data | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteSnapshotJournal stores the serialized in-memory diff layers to save at
 | ||||||
|  | // shutdown. The blob is expected to be max a few 10s of megabytes.
 | ||||||
|  | func WriteSnapshotJournal(db ethdb.KeyValueWriter, journal []byte) { | ||||||
|  | 	if err := db.Put(snapshotJournalKey, journal); err != nil { | ||||||
|  | 		log.Crit("Failed to store snapshot journal", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
 | ||||||
|  | // the last shutdown
 | ||||||
|  | func DeleteSnapshotJournal(db ethdb.KeyValueWriter) { | ||||||
|  | 	if err := db.Delete(snapshotJournalKey); err != nil { | ||||||
|  | 		log.Crit("Failed to remove snapshot journal", "err", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
| @ -239,6 +239,8 @@ func InspectDatabase(db ethdb.Database) error { | |||||||
| 		hashNumPairing  common.StorageSize | 		hashNumPairing  common.StorageSize | ||||||
| 		trieSize        common.StorageSize | 		trieSize        common.StorageSize | ||||||
| 		txlookupSize    common.StorageSize | 		txlookupSize    common.StorageSize | ||||||
|  | 		accountSnapSize common.StorageSize | ||||||
|  | 		storageSnapSize common.StorageSize | ||||||
| 		preimageSize    common.StorageSize | 		preimageSize    common.StorageSize | ||||||
| 		bloomBitsSize   common.StorageSize | 		bloomBitsSize   common.StorageSize | ||||||
| 		cliqueSnapsSize common.StorageSize | 		cliqueSnapsSize common.StorageSize | ||||||
| @ -280,6 +282,10 @@ func InspectDatabase(db ethdb.Database) error { | |||||||
| 			receiptSize += size | 			receiptSize += size | ||||||
| 		case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): | 		case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): | ||||||
| 			txlookupSize += size | 			txlookupSize += size | ||||||
|  | 		case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength): | ||||||
|  | 			accountSnapSize += size | ||||||
|  | 		case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): | ||||||
|  | 			storageSnapSize += size | ||||||
| 		case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): | 		case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): | ||||||
| 			preimageSize += size | 			preimageSize += size | ||||||
| 		case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): | 		case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): | ||||||
| @ -331,6 +337,8 @@ func InspectDatabase(db ethdb.Database) error { | |||||||
| 		{"Key-Value store", "Bloombit index", bloomBitsSize.String()}, | 		{"Key-Value store", "Bloombit index", bloomBitsSize.String()}, | ||||||
| 		{"Key-Value store", "Trie nodes", trieSize.String()}, | 		{"Key-Value store", "Trie nodes", trieSize.String()}, | ||||||
| 		{"Key-Value store", "Trie preimages", preimageSize.String()}, | 		{"Key-Value store", "Trie preimages", preimageSize.String()}, | ||||||
|  | 		{"Key-Value store", "Account snapshot", accountSnapSize.String()}, | ||||||
|  | 		{"Key-Value store", "Storage snapshot", storageSnapSize.String()}, | ||||||
| 		{"Key-Value store", "Clique snapshots", cliqueSnapsSize.String()}, | 		{"Key-Value store", "Clique snapshots", cliqueSnapsSize.String()}, | ||||||
| 		{"Key-Value store", "Singleton metadata", metadata.String()}, | 		{"Key-Value store", "Singleton metadata", metadata.String()}, | ||||||
| 		{"Ancient store", "Headers", ancientHeaders.String()}, | 		{"Ancient store", "Headers", ancientHeaders.String()}, | ||||||
|  | |||||||
| @ -41,6 +41,12 @@ var ( | |||||||
| 	// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
 | 	// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
 | ||||||
| 	fastTrieProgressKey = []byte("TrieSync") | 	fastTrieProgressKey = []byte("TrieSync") | ||||||
| 
 | 
 | ||||||
|  | 	// snapshotRootKey tracks the hash of the last snapshot.
 | ||||||
|  | 	snapshotRootKey = []byte("SnapshotRoot") | ||||||
|  | 
 | ||||||
|  | 	// snapshotJournalKey tracks the in-memory diff layers across restarts.
 | ||||||
|  | 	snapshotJournalKey = []byte("SnapshotJournal") | ||||||
|  | 
 | ||||||
| 	// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
 | 	// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
 | ||||||
| 	headerPrefix       = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
 | 	headerPrefix       = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
 | ||||||
| 	headerTDSuffix     = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
 | 	headerTDSuffix     = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
 | ||||||
| @ -50,8 +56,10 @@ var ( | |||||||
| 	blockBodyPrefix     = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
 | 	blockBodyPrefix     = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body
 | ||||||
| 	blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
 | 	blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
 | ||||||
| 
 | 
 | ||||||
| 	txLookupPrefix  = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
 | 	txLookupPrefix        = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
 | ||||||
| 	bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
 | 	bloomBitsPrefix       = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
 | ||||||
|  | 	SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
 | ||||||
|  | 	SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
 | ||||||
| 
 | 
 | ||||||
| 	preimagePrefix = []byte("secure-key-")      // preimagePrefix + hash -> preimage
 | 	preimagePrefix = []byte("secure-key-")      // preimagePrefix + hash -> preimage
 | ||||||
| 	configPrefix   = []byte("ethereum-config-") // config prefix for the db
 | 	configPrefix   = []byte("ethereum-config-") // config prefix for the db
 | ||||||
| @ -145,6 +153,21 @@ func txLookupKey(hash common.Hash) []byte { | |||||||
| 	return append(txLookupPrefix, hash.Bytes()...) | 	return append(txLookupPrefix, hash.Bytes()...) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // accountSnapshotKey = SnapshotAccountPrefix + hash
 | ||||||
|  | func accountSnapshotKey(hash common.Hash) []byte { | ||||||
|  | 	return append(SnapshotAccountPrefix, hash.Bytes()...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
 | ||||||
|  | func storageSnapshotKey(accountHash, storageHash common.Hash) []byte { | ||||||
|  | 	return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
 | ||||||
|  | func storageSnapshotsKey(accountHash common.Hash) []byte { | ||||||
|  | 	return append(SnapshotStoragePrefix, accountHash.Bytes()...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
 | // bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash
 | ||||||
| func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { | func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { | ||||||
| 	key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) | 	key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) | ||||||
|  | |||||||
| @ -29,7 +29,7 @@ func TestNodeIteratorCoverage(t *testing.T) { | |||||||
| 	// Create some arbitrary test state to iterate
 | 	// Create some arbitrary test state to iterate
 | ||||||
| 	db, root, _ := makeTestState() | 	db, root, _ := makeTestState() | ||||||
| 
 | 
 | ||||||
| 	state, err := New(root, db) | 	state, err := New(root, db, nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("failed to create state trie at %x: %v", root, err) | 		t.Fatalf("failed to create state trie at %x: %v", root, err) | ||||||
| 	} | 	} | ||||||
|  | |||||||
| @ -90,7 +90,8 @@ type ( | |||||||
| 		account *common.Address | 		account *common.Address | ||||||
| 	} | 	} | ||||||
| 	resetObjectChange struct { | 	resetObjectChange struct { | ||||||
| 		prev *stateObject | 		prev         *stateObject | ||||||
|  | 		prevdestruct bool | ||||||
| 	} | 	} | ||||||
| 	suicideChange struct { | 	suicideChange struct { | ||||||
| 		account     *common.Address | 		account     *common.Address | ||||||
| @ -142,6 +143,9 @@ func (ch createObjectChange) dirtied() *common.Address { | |||||||
| 
 | 
 | ||||||
| func (ch resetObjectChange) revert(s *StateDB) { | func (ch resetObjectChange) revert(s *StateDB) { | ||||||
| 	s.setStateObject(ch.prev) | 	s.setStateObject(ch.prev) | ||||||
|  | 	if !ch.prevdestruct && s.snap != nil { | ||||||
|  | 		delete(s.snapDestructs, ch.prev.addrHash) | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (ch resetObjectChange) dirtied() *common.Address { | func (ch resetObjectChange) dirtied() *common.Address { | ||||||
|  | |||||||
							
								
								
									
										54
									
								
								core/state/snapshot/account.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								core/state/snapshot/account.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,54 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"math/big" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Account is a slim version of a state.Account, where the root and code hash
 | ||||||
|  | // are replaced with a nil byte slice for empty accounts.
 | ||||||
|  | type Account struct { | ||||||
|  | 	Nonce    uint64 | ||||||
|  | 	Balance  *big.Int | ||||||
|  | 	Root     []byte | ||||||
|  | 	CodeHash []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountRLP converts a state.Account content into a slim snapshot version RLP
 | ||||||
|  | // encoded.
 | ||||||
|  | func AccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte { | ||||||
|  | 	slim := Account{ | ||||||
|  | 		Nonce:   nonce, | ||||||
|  | 		Balance: balance, | ||||||
|  | 	} | ||||||
|  | 	if root != emptyRoot { | ||||||
|  | 		slim.Root = root[:] | ||||||
|  | 	} | ||||||
|  | 	if !bytes.Equal(codehash, emptyCode[:]) { | ||||||
|  | 		slim.CodeHash = codehash | ||||||
|  | 	} | ||||||
|  | 	data, err := rlp.EncodeToBytes(slim) | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	return data | ||||||
|  | } | ||||||
							
								
								
									
										533
									
								
								core/state/snapshot/difflayer.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										533
									
								
								core/state/snapshot/difflayer.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,533 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"fmt" | ||||||
|  | 	"math" | ||||||
|  | 	"math/rand" | ||||||
|  | 	"sort" | ||||||
|  | 	"sync" | ||||||
|  | 	"sync/atomic" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | 	"github.com/steakknife/bloomfilter" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// aggregatorMemoryLimit is the maximum size of the bottom-most diff layer
 | ||||||
|  | 	// that aggregates the writes from above until it's flushed into the disk
 | ||||||
|  | 	// layer.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Note, bumping this up might drastically increase the size of the bloom
 | ||||||
|  | 	// filters that's stored in every diff layer. Don't do that without fully
 | ||||||
|  | 	// understanding all the implications.
 | ||||||
|  | 	aggregatorMemoryLimit = uint64(4 * 1024 * 1024) | ||||||
|  | 
 | ||||||
|  | 	// aggregatorItemLimit is an approximate number of items that will end up
 | ||||||
|  | 	// in the agregator layer before it's flushed out to disk. A plain account
 | ||||||
|  | 	// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
 | ||||||
|  | 	// 0B (+hash). Slots are mostly set/unset in lockstep, so thet average at
 | ||||||
|  | 	// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
 | ||||||
|  | 	// smaller number to be on the safe side.
 | ||||||
|  | 	aggregatorItemLimit = aggregatorMemoryLimit / 42 | ||||||
|  | 
 | ||||||
|  | 	// bloomTargetError is the target false positive rate when the aggregator
 | ||||||
|  | 	// layer is at its fullest. The actual value will probably move around up
 | ||||||
|  | 	// and down from this number, it's mostly a ballpark figure.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Note, dropping this down might drastically increase the size of the bloom
 | ||||||
|  | 	// filters that's stored in every diff layer. Don't do that without fully
 | ||||||
|  | 	// understanding all the implications.
 | ||||||
|  | 	bloomTargetError = 0.02 | ||||||
|  | 
 | ||||||
|  | 	// bloomSize is the ideal bloom filter size given the maximum number of items
 | ||||||
|  | 	// it's expected to hold and the target false positive error rate.
 | ||||||
|  | 	bloomSize = math.Ceil(float64(aggregatorItemLimit) * math.Log(bloomTargetError) / math.Log(1/math.Pow(2, math.Log(2)))) | ||||||
|  | 
 | ||||||
|  | 	// bloomFuncs is the ideal number of bits a single entry should set in the
 | ||||||
|  | 	// bloom filter to keep its size to a minimum (given it's size and maximum
 | ||||||
|  | 	// entry count).
 | ||||||
|  | 	bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2)) | ||||||
|  | 
 | ||||||
|  | 	// the bloom offsets are runtime constants which determines which part of the
 | ||||||
|  | 	// the account/storage hash the hasher functions looks at, to determine the
 | ||||||
|  | 	// bloom key for an account/slot. This is randomized at init(), so that the
 | ||||||
|  | 	// global population of nodes do not all display the exact same behaviour with
 | ||||||
|  | 	// regards to bloom content
 | ||||||
|  | 	bloomDestructHasherOffset = 0 | ||||||
|  | 	bloomAccountHasherOffset  = 0 | ||||||
|  | 	bloomStorageHasherOffset  = 0 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func init() { | ||||||
|  | 	// Init the bloom offsets in the range [0:24] (requires 8 bytes)
 | ||||||
|  | 	bloomDestructHasherOffset = rand.Intn(25) | ||||||
|  | 	bloomAccountHasherOffset = rand.Intn(25) | ||||||
|  | 	bloomStorageHasherOffset = rand.Intn(25) | ||||||
|  | 
 | ||||||
|  | 	// The destruct and account blooms must be different, as the storage slots
 | ||||||
|  | 	// will check for destruction too for every bloom miss. It should not collide
 | ||||||
|  | 	// with modified accounts.
 | ||||||
|  | 	for bloomAccountHasherOffset == bloomDestructHasherOffset { | ||||||
|  | 		bloomAccountHasherOffset = rand.Intn(25) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // diffLayer represents a collection of modifications made to a state snapshot
 | ||||||
|  | // after running a block on top. It contains one sorted list for the account trie
 | ||||||
|  | // and one-one list for each storage tries.
 | ||||||
|  | //
 | ||||||
|  | // The goal of a diff layer is to act as a journal, tracking recent modifications
 | ||||||
|  | // made to the state, that have not yet graduated into a semi-immutable state.
 | ||||||
|  | type diffLayer struct { | ||||||
|  | 	origin *diskLayer // Base disk layer to directly use on bloom misses
 | ||||||
|  | 	parent snapshot   // Parent snapshot modified by this one, never nil
 | ||||||
|  | 	memory uint64     // Approximate guess as to how much memory we use
 | ||||||
|  | 
 | ||||||
|  | 	root  common.Hash // Root hash to which this snapshot diff belongs to
 | ||||||
|  | 	stale uint32      // Signals that the layer became stale (state progressed)
 | ||||||
|  | 
 | ||||||
|  | 	destructSet map[common.Hash]struct{}               // Keyed markers for deleted (and potentially) recreated accounts
 | ||||||
|  | 	accountList []common.Hash                          // List of account for iteration. If it exists, it's sorted, otherwise it's nil
 | ||||||
|  | 	accountData map[common.Hash][]byte                 // Keyed accounts for direct retrival (nil means deleted)
 | ||||||
|  | 	storageList map[common.Hash][]common.Hash          // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
 | ||||||
|  | 	storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrival. one per account (nil means deleted)
 | ||||||
|  | 
 | ||||||
|  | 	diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer
 | ||||||
|  | 
 | ||||||
|  | 	lock sync.RWMutex | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // destructBloomHasher is a wrapper around a common.Hash to satisfy the interface
 | ||||||
|  | // API requirements of the bloom library used. It's used to convert a destruct
 | ||||||
|  | // event into a 64 bit mini hash.
 | ||||||
|  | type destructBloomHasher common.Hash | ||||||
|  | 
 | ||||||
|  | func (h destructBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } | ||||||
|  | func (h destructBloomHasher) Sum(b []byte) []byte               { panic("not implemented") } | ||||||
|  | func (h destructBloomHasher) Reset()                            { panic("not implemented") } | ||||||
|  | func (h destructBloomHasher) BlockSize() int                    { panic("not implemented") } | ||||||
|  | func (h destructBloomHasher) Size() int                         { return 8 } | ||||||
|  | func (h destructBloomHasher) Sum64() uint64 { | ||||||
|  | 	return binary.BigEndian.Uint64(h[bloomDestructHasherOffset : bloomDestructHasherOffset+8]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // accountBloomHasher is a wrapper around a common.Hash to satisfy the interface
 | ||||||
|  | // API requirements of the bloom library used. It's used to convert an account
 | ||||||
|  | // hash into a 64 bit mini hash.
 | ||||||
|  | type accountBloomHasher common.Hash | ||||||
|  | 
 | ||||||
|  | func (h accountBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } | ||||||
|  | func (h accountBloomHasher) Sum(b []byte) []byte               { panic("not implemented") } | ||||||
|  | func (h accountBloomHasher) Reset()                            { panic("not implemented") } | ||||||
|  | func (h accountBloomHasher) BlockSize() int                    { panic("not implemented") } | ||||||
|  | func (h accountBloomHasher) Size() int                         { return 8 } | ||||||
|  | func (h accountBloomHasher) Sum64() uint64 { | ||||||
|  | 	return binary.BigEndian.Uint64(h[bloomAccountHasherOffset : bloomAccountHasherOffset+8]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // storageBloomHasher is a wrapper around a [2]common.Hash to satisfy the interface
 | ||||||
|  | // API requirements of the bloom library used. It's used to convert an account
 | ||||||
|  | // hash into a 64 bit mini hash.
 | ||||||
|  | type storageBloomHasher [2]common.Hash | ||||||
|  | 
 | ||||||
|  | func (h storageBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } | ||||||
|  | func (h storageBloomHasher) Sum(b []byte) []byte               { panic("not implemented") } | ||||||
|  | func (h storageBloomHasher) Reset()                            { panic("not implemented") } | ||||||
|  | func (h storageBloomHasher) BlockSize() int                    { panic("not implemented") } | ||||||
|  | func (h storageBloomHasher) Size() int                         { return 8 } | ||||||
|  | func (h storageBloomHasher) Sum64() uint64 { | ||||||
|  | 	return binary.BigEndian.Uint64(h[0][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) ^ | ||||||
|  | 		binary.BigEndian.Uint64(h[1][bloomStorageHasherOffset:bloomStorageHasherOffset+8]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
 | ||||||
|  | // level persistent database or a hierarchical diff already.
 | ||||||
|  | func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||||
|  | 	// Create the new layer with some pre-allocated data segments
 | ||||||
|  | 	dl := &diffLayer{ | ||||||
|  | 		parent:      parent, | ||||||
|  | 		root:        root, | ||||||
|  | 		destructSet: destructs, | ||||||
|  | 		accountData: accounts, | ||||||
|  | 		storageData: storage, | ||||||
|  | 	} | ||||||
|  | 	switch parent := parent.(type) { | ||||||
|  | 	case *diskLayer: | ||||||
|  | 		dl.rebloom(parent) | ||||||
|  | 	case *diffLayer: | ||||||
|  | 		dl.rebloom(parent.origin) | ||||||
|  | 	default: | ||||||
|  | 		panic("unknown parent type") | ||||||
|  | 	} | ||||||
|  | 	// Sanity check that accounts or storage slots are never nil
 | ||||||
|  | 	for accountHash, blob := range accounts { | ||||||
|  | 		if blob == nil { | ||||||
|  | 			panic(fmt.Sprintf("account %#x nil", accountHash)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for accountHash, slots := range storage { | ||||||
|  | 		if slots == nil { | ||||||
|  | 			panic(fmt.Sprintf("storage %#x nil", accountHash)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Determine memory size and track the dirty writes
 | ||||||
|  | 	for _, data := range accounts { | ||||||
|  | 		dl.memory += uint64(common.HashLength + len(data)) | ||||||
|  | 		snapshotDirtyAccountWriteMeter.Mark(int64(len(data))) | ||||||
|  | 	} | ||||||
|  | 	// Fill the storage hashes and sort them for the iterator
 | ||||||
|  | 	dl.storageList = make(map[common.Hash][]common.Hash) | ||||||
|  | 	for accountHash := range destructs { | ||||||
|  | 		dl.storageList[accountHash] = nil | ||||||
|  | 	} | ||||||
|  | 	// Determine memory size and track the dirty writes
 | ||||||
|  | 	for _, slots := range storage { | ||||||
|  | 		for _, data := range slots { | ||||||
|  | 			dl.memory += uint64(common.HashLength + len(data)) | ||||||
|  | 			snapshotDirtyStorageWriteMeter.Mark(int64(len(data))) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	dl.memory += uint64(len(dl.storageList) * common.HashLength) | ||||||
|  | 	return dl | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // rebloom discards the layer's current bloom and rebuilds it from scratch based
 | ||||||
|  | // on the parent's and the local diffs.
 | ||||||
|  | func (dl *diffLayer) rebloom(origin *diskLayer) { | ||||||
|  | 	dl.lock.Lock() | ||||||
|  | 	defer dl.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	defer func(start time.Time) { | ||||||
|  | 		snapshotBloomIndexTimer.Update(time.Since(start)) | ||||||
|  | 	}(time.Now()) | ||||||
|  | 
 | ||||||
|  | 	// Inject the new origin that triggered the rebloom
 | ||||||
|  | 	dl.origin = origin | ||||||
|  | 
 | ||||||
|  | 	// Retrieve the parent bloom or create a fresh empty one
 | ||||||
|  | 	if parent, ok := dl.parent.(*diffLayer); ok { | ||||||
|  | 		parent.lock.RLock() | ||||||
|  | 		dl.diffed, _ = parent.diffed.Copy() | ||||||
|  | 		parent.lock.RUnlock() | ||||||
|  | 	} else { | ||||||
|  | 		dl.diffed, _ = bloomfilter.New(uint64(bloomSize), uint64(bloomFuncs)) | ||||||
|  | 	} | ||||||
|  | 	// Iterate over all the accounts and storage slots and index them
 | ||||||
|  | 	for hash := range dl.destructSet { | ||||||
|  | 		dl.diffed.Add(destructBloomHasher(hash)) | ||||||
|  | 	} | ||||||
|  | 	for hash := range dl.accountData { | ||||||
|  | 		dl.diffed.Add(accountBloomHasher(hash)) | ||||||
|  | 	} | ||||||
|  | 	for accountHash, slots := range dl.storageData { | ||||||
|  | 		for storageHash := range slots { | ||||||
|  | 			dl.diffed.Add(storageBloomHasher{accountHash, storageHash}) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Calculate the current false positive rate and update the error rate meter.
 | ||||||
|  | 	// This is a bit cheating because subsequent layers will overwrite it, but it
 | ||||||
|  | 	// should be fine, we're only interested in ballpark figures.
 | ||||||
|  | 	k := float64(dl.diffed.K()) | ||||||
|  | 	n := float64(dl.diffed.N()) | ||||||
|  | 	m := float64(dl.diffed.M()) | ||||||
|  | 	snapshotBloomErrorGauge.Update(math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Root returns the root hash for which this snapshot was made.
 | ||||||
|  | func (dl *diffLayer) Root() common.Hash { | ||||||
|  | 	return dl.root | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parent returns the subsequent layer of a diff layer.
 | ||||||
|  | func (dl *diffLayer) Parent() snapshot { | ||||||
|  | 	return dl.parent | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Stale return whether this layer has become stale (was flattened across) or if
 | ||||||
|  | // it's still live.
 | ||||||
|  | func (dl *diffLayer) Stale() bool { | ||||||
|  | 	return atomic.LoadUint32(&dl.stale) != 0 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account directly retrieves the account associated with a particular hash in
 | ||||||
|  | // the snapshot slim data format.
 | ||||||
|  | func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { | ||||||
|  | 	data, err := dl.AccountRLP(hash) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	if len(data) == 0 { // can be both nil and []byte{}
 | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 	account := new(Account) | ||||||
|  | 	if err := rlp.DecodeBytes(data, account); err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	return account, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountRLP directly retrieves the account RLP associated with a particular
 | ||||||
|  | // hash in the snapshot slim data format.
 | ||||||
|  | func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { | ||||||
|  | 	// Check the bloom filter first whether there's even a point in reaching into
 | ||||||
|  | 	// all the maps in all the layers below
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	hit := dl.diffed.Contains(accountBloomHasher(hash)) | ||||||
|  | 	if !hit { | ||||||
|  | 		hit = dl.diffed.Contains(destructBloomHasher(hash)) | ||||||
|  | 	} | ||||||
|  | 	dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the bloom filter misses, don't even bother with traversing the memory
 | ||||||
|  | 	// diff layers, reach straight into the bottom persistent disk layer
 | ||||||
|  | 	if !hit { | ||||||
|  | 		snapshotBloomAccountMissMeter.Mark(1) | ||||||
|  | 		return dl.origin.AccountRLP(hash) | ||||||
|  | 	} | ||||||
|  | 	// The bloom filter hit, start poking in the internal maps
 | ||||||
|  | 	return dl.accountRLP(hash, 0) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // accountRLP is an internal version of AccountRLP that skips the bloom filter
 | ||||||
|  | // checks and uses the internal maps to try and retrieve the data. It's meant
 | ||||||
|  | // to be used if a higher layer's bloom filter hit already.
 | ||||||
|  | func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) { | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the layer was flattened into, consider it invalid (any live reference to
 | ||||||
|  | 	// the original should be marked as unusable).
 | ||||||
|  | 	if dl.Stale() { | ||||||
|  | 		return nil, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	// If the account is known locally, return it
 | ||||||
|  | 	if data, ok := dl.accountData[hash]; ok { | ||||||
|  | 		snapshotDirtyAccountHitMeter.Mark(1) | ||||||
|  | 		snapshotDirtyAccountHitDepthHist.Update(int64(depth)) | ||||||
|  | 		snapshotDirtyAccountReadMeter.Mark(int64(len(data))) | ||||||
|  | 		snapshotBloomAccountTrueHitMeter.Mark(1) | ||||||
|  | 		return data, nil | ||||||
|  | 	} | ||||||
|  | 	// If the account is known locally, but deleted, return it
 | ||||||
|  | 	if _, ok := dl.destructSet[hash]; ok { | ||||||
|  | 		snapshotDirtyAccountHitMeter.Mark(1) | ||||||
|  | 		snapshotDirtyAccountHitDepthHist.Update(int64(depth)) | ||||||
|  | 		snapshotDirtyAccountInexMeter.Mark(1) | ||||||
|  | 		snapshotBloomAccountTrueHitMeter.Mark(1) | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 	// Account unknown to this diff, resolve from parent
 | ||||||
|  | 	if diff, ok := dl.parent.(*diffLayer); ok { | ||||||
|  | 		return diff.accountRLP(hash, depth+1) | ||||||
|  | 	} | ||||||
|  | 	// Failed to resolve through diff layers, mark a bloom error and use the disk
 | ||||||
|  | 	snapshotBloomAccountFalseHitMeter.Mark(1) | ||||||
|  | 	return dl.parent.AccountRLP(hash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Storage directly retrieves the storage data associated with a particular hash,
 | ||||||
|  | // within a particular account. If the slot is unknown to this diff, it's parent
 | ||||||
|  | // is consulted.
 | ||||||
|  | func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { | ||||||
|  | 	// Check the bloom filter first whether there's even a point in reaching into
 | ||||||
|  | 	// all the maps in all the layers below
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	hit := dl.diffed.Contains(storageBloomHasher{accountHash, storageHash}) | ||||||
|  | 	if !hit { | ||||||
|  | 		hit = dl.diffed.Contains(destructBloomHasher(accountHash)) | ||||||
|  | 	} | ||||||
|  | 	dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the bloom filter misses, don't even bother with traversing the memory
 | ||||||
|  | 	// diff layers, reach straight into the bottom persistent disk layer
 | ||||||
|  | 	if !hit { | ||||||
|  | 		snapshotBloomStorageMissMeter.Mark(1) | ||||||
|  | 		return dl.origin.Storage(accountHash, storageHash) | ||||||
|  | 	} | ||||||
|  | 	// The bloom filter hit, start poking in the internal maps
 | ||||||
|  | 	return dl.storage(accountHash, storageHash, 0) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // storage is an internal version of Storage that skips the bloom filter checks
 | ||||||
|  | // and uses the internal maps to try and retrieve the data. It's meant  to be
 | ||||||
|  | // used if a higher layer's bloom filter hit already.
 | ||||||
|  | func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) { | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the layer was flattened into, consider it invalid (any live reference to
 | ||||||
|  | 	// the original should be marked as unusable).
 | ||||||
|  | 	if dl.Stale() { | ||||||
|  | 		return nil, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	// If the account is known locally, try to resolve the slot locally
 | ||||||
|  | 	if storage, ok := dl.storageData[accountHash]; ok { | ||||||
|  | 		if data, ok := storage[storageHash]; ok { | ||||||
|  | 			snapshotDirtyStorageHitMeter.Mark(1) | ||||||
|  | 			snapshotDirtyStorageHitDepthHist.Update(int64(depth)) | ||||||
|  | 			if n := len(data); n > 0 { | ||||||
|  | 				snapshotDirtyStorageReadMeter.Mark(int64(n)) | ||||||
|  | 			} else { | ||||||
|  | 				snapshotDirtyStorageInexMeter.Mark(1) | ||||||
|  | 			} | ||||||
|  | 			snapshotBloomStorageTrueHitMeter.Mark(1) | ||||||
|  | 			return data, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// If the account is known locally, but deleted, return an empty slot
 | ||||||
|  | 	if _, ok := dl.destructSet[accountHash]; ok { | ||||||
|  | 		snapshotDirtyStorageHitMeter.Mark(1) | ||||||
|  | 		snapshotDirtyStorageHitDepthHist.Update(int64(depth)) | ||||||
|  | 		snapshotDirtyStorageInexMeter.Mark(1) | ||||||
|  | 		snapshotBloomStorageTrueHitMeter.Mark(1) | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 	// Storage slot unknown to this diff, resolve from parent
 | ||||||
|  | 	if diff, ok := dl.parent.(*diffLayer); ok { | ||||||
|  | 		return diff.storage(accountHash, storageHash, depth+1) | ||||||
|  | 	} | ||||||
|  | 	// Failed to resolve through diff layers, mark a bloom error and use the disk
 | ||||||
|  | 	snapshotBloomStorageFalseHitMeter.Mark(1) | ||||||
|  | 	return dl.parent.Storage(accountHash, storageHash) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Update creates a new layer on top of the existing snapshot diff tree with
 | ||||||
|  | // the specified data items.
 | ||||||
|  | func (dl *diffLayer) Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||||
|  | 	return newDiffLayer(dl, blockRoot, destructs, accounts, storage) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // flatten pushes all data from this point downwards, flattening everything into
 | ||||||
|  | // a single diff at the bottom. Since usually the lowermost diff is the largest,
 | ||||||
|  | // the flattening bulds up from there in reverse.
 | ||||||
|  | func (dl *diffLayer) flatten() snapshot { | ||||||
|  | 	// If the parent is not diff, we're the first in line, return unmodified
 | ||||||
|  | 	parent, ok := dl.parent.(*diffLayer) | ||||||
|  | 	if !ok { | ||||||
|  | 		return dl | ||||||
|  | 	} | ||||||
|  | 	// Parent is a diff, flatten it first (note, apart from weird corned cases,
 | ||||||
|  | 	// flatten will realistically only ever merge 1 layer, so there's no need to
 | ||||||
|  | 	// be smarter about grouping flattens together).
 | ||||||
|  | 	parent = parent.flatten().(*diffLayer) | ||||||
|  | 
 | ||||||
|  | 	parent.lock.Lock() | ||||||
|  | 	defer parent.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Before actually writing all our data to the parent, first ensure that the
 | ||||||
|  | 	// parent hasn't been 'corrupted' by someone else already flattening into it
 | ||||||
|  | 	if atomic.SwapUint32(&parent.stale, 1) != 0 { | ||||||
|  | 		panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
 | ||||||
|  | 	} | ||||||
|  | 	// Overwrite all the updated accounts blindly, merge the sorted list
 | ||||||
|  | 	for hash := range dl.destructSet { | ||||||
|  | 		parent.destructSet[hash] = struct{}{} | ||||||
|  | 		delete(parent.accountData, hash) | ||||||
|  | 		delete(parent.storageData, hash) | ||||||
|  | 	} | ||||||
|  | 	for hash, data := range dl.accountData { | ||||||
|  | 		parent.accountData[hash] = data | ||||||
|  | 	} | ||||||
|  | 	// Overwrite all the updated storage slots (individually)
 | ||||||
|  | 	for accountHash, storage := range dl.storageData { | ||||||
|  | 		// If storage didn't exist (or was deleted) in the parent, overwrite blindly
 | ||||||
|  | 		if _, ok := parent.storageData[accountHash]; !ok { | ||||||
|  | 			parent.storageData[accountHash] = storage | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		// Storage exists in both parent and child, merge the slots
 | ||||||
|  | 		comboData := parent.storageData[accountHash] | ||||||
|  | 		for storageHash, data := range storage { | ||||||
|  | 			comboData[storageHash] = data | ||||||
|  | 		} | ||||||
|  | 		parent.storageData[accountHash] = comboData | ||||||
|  | 	} | ||||||
|  | 	// Return the combo parent
 | ||||||
|  | 	return &diffLayer{ | ||||||
|  | 		parent:      parent.parent, | ||||||
|  | 		origin:      parent.origin, | ||||||
|  | 		root:        dl.root, | ||||||
|  | 		destructSet: parent.destructSet, | ||||||
|  | 		accountData: parent.accountData, | ||||||
|  | 		storageData: parent.storageData, | ||||||
|  | 		storageList: make(map[common.Hash][]common.Hash), | ||||||
|  | 		diffed:      dl.diffed, | ||||||
|  | 		memory:      parent.memory + dl.memory, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountList returns a sorted list of all accounts in this difflayer, including
 | ||||||
|  | // the deleted ones.
 | ||||||
|  | //
 | ||||||
|  | // Note, the returned slice is not a copy, so do not modify it.
 | ||||||
|  | func (dl *diffLayer) AccountList() []common.Hash { | ||||||
|  | 	// If an old list already exists, return it
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	list := dl.accountList | ||||||
|  | 	dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	if list != nil { | ||||||
|  | 		return list | ||||||
|  | 	} | ||||||
|  | 	// No old sorted account list exists, generate a new one
 | ||||||
|  | 	dl.lock.Lock() | ||||||
|  | 	defer dl.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	dl.accountList = make([]common.Hash, 0, len(dl.destructSet)+len(dl.accountData)) | ||||||
|  | 	for hash := range dl.accountData { | ||||||
|  | 		dl.accountList = append(dl.accountList, hash) | ||||||
|  | 	} | ||||||
|  | 	for hash := range dl.destructSet { | ||||||
|  | 		if _, ok := dl.accountData[hash]; !ok { | ||||||
|  | 			dl.accountList = append(dl.accountList, hash) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	sort.Sort(hashes(dl.accountList)) | ||||||
|  | 	return dl.accountList | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StorageList returns a sorted list of all storage slot hashes in this difflayer
 | ||||||
|  | // for the given account.
 | ||||||
|  | //
 | ||||||
|  | // Note, the returned slice is not a copy, so do not modify it.
 | ||||||
|  | func (dl *diffLayer) StorageList(accountHash common.Hash) []common.Hash { | ||||||
|  | 	// If an old list already exists, return it
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	list := dl.storageList[accountHash] | ||||||
|  | 	dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	if list != nil { | ||||||
|  | 		return list | ||||||
|  | 	} | ||||||
|  | 	// No old sorted account list exists, generate a new one
 | ||||||
|  | 	dl.lock.Lock() | ||||||
|  | 	defer dl.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	storageMap := dl.storageData[accountHash] | ||||||
|  | 	storageList := make([]common.Hash, 0, len(storageMap)) | ||||||
|  | 	for k := range storageMap { | ||||||
|  | 		storageList = append(storageList, k) | ||||||
|  | 	} | ||||||
|  | 	sort.Sort(hashes(storageList)) | ||||||
|  | 	dl.storageList[accountHash] = storageList | ||||||
|  | 	return storageList | ||||||
|  | } | ||||||
							
								
								
									
										399
									
								
								core/state/snapshot/difflayer_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										399
									
								
								core/state/snapshot/difflayer_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,399 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"math/rand" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/crypto" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb/memorydb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func copyDestructs(destructs map[common.Hash]struct{}) map[common.Hash]struct{} { | ||||||
|  | 	copy := make(map[common.Hash]struct{}) | ||||||
|  | 	for hash := range destructs { | ||||||
|  | 		copy[hash] = struct{}{} | ||||||
|  | 	} | ||||||
|  | 	return copy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func copyAccounts(accounts map[common.Hash][]byte) map[common.Hash][]byte { | ||||||
|  | 	copy := make(map[common.Hash][]byte) | ||||||
|  | 	for hash, blob := range accounts { | ||||||
|  | 		copy[hash] = blob | ||||||
|  | 	} | ||||||
|  | 	return copy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func copyStorage(storage map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { | ||||||
|  | 	copy := make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 	for accHash, slots := range storage { | ||||||
|  | 		copy[accHash] = make(map[common.Hash][]byte) | ||||||
|  | 		for slotHash, blob := range slots { | ||||||
|  | 			copy[accHash][slotHash] = blob | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return copy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestMergeBasics tests some simple merges
 | ||||||
|  | func TestMergeBasics(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		destructs = make(map[common.Hash]struct{}) | ||||||
|  | 		accounts  = make(map[common.Hash][]byte) | ||||||
|  | 		storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 	) | ||||||
|  | 	// Fill up a parent
 | ||||||
|  | 	for i := 0; i < 100; i++ { | ||||||
|  | 		h := randomHash() | ||||||
|  | 		data := randomAccount() | ||||||
|  | 
 | ||||||
|  | 		accounts[h] = data | ||||||
|  | 		if rand.Intn(4) == 0 { | ||||||
|  | 			destructs[h] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 		if rand.Intn(2) == 0 { | ||||||
|  | 			accStorage := make(map[common.Hash][]byte) | ||||||
|  | 			value := make([]byte, 32) | ||||||
|  | 			rand.Read(value) | ||||||
|  | 			accStorage[randomHash()] = value | ||||||
|  | 			storage[h] = accStorage | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Add some (identical) layers on top
 | ||||||
|  | 	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	child := newDiffLayer(parent, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	child = newDiffLayer(child, common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	// And flatten
 | ||||||
|  | 	merged := (child.flatten()).(*diffLayer) | ||||||
|  | 
 | ||||||
|  | 	{ // Check account lists
 | ||||||
|  | 		if have, want := len(merged.accountList), 0; have != want { | ||||||
|  | 			t.Errorf("accountList wrong: have %v, want %v", have, want) | ||||||
|  | 		} | ||||||
|  | 		if have, want := len(merged.AccountList()), len(accounts); have != want { | ||||||
|  | 			t.Errorf("AccountList() wrong: have %v, want %v", have, want) | ||||||
|  | 		} | ||||||
|  | 		if have, want := len(merged.accountList), len(accounts); have != want { | ||||||
|  | 			t.Errorf("accountList [2] wrong: have %v, want %v", have, want) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	{ // Check account drops
 | ||||||
|  | 		if have, want := len(merged.destructSet), len(destructs); have != want { | ||||||
|  | 			t.Errorf("accountDrop wrong: have %v, want %v", have, want) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	{ // Check storage lists
 | ||||||
|  | 		i := 0 | ||||||
|  | 		for aHash, sMap := range storage { | ||||||
|  | 			if have, want := len(merged.storageList), i; have != want { | ||||||
|  | 				t.Errorf("[1] storageList wrong: have %v, want %v", have, want) | ||||||
|  | 			} | ||||||
|  | 			if have, want := len(merged.StorageList(aHash)), len(sMap); have != want { | ||||||
|  | 				t.Errorf("[2] StorageList() wrong: have %v, want %v", have, want) | ||||||
|  | 			} | ||||||
|  | 			if have, want := len(merged.storageList[aHash]), len(sMap); have != want { | ||||||
|  | 				t.Errorf("storageList wrong: have %v, want %v", have, want) | ||||||
|  | 			} | ||||||
|  | 			i++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestMergeDelete tests some deletion
 | ||||||
|  | func TestMergeDelete(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		storage = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 	) | ||||||
|  | 	// Fill up a parent
 | ||||||
|  | 	h1 := common.HexToHash("0x01") | ||||||
|  | 	h2 := common.HexToHash("0x02") | ||||||
|  | 
 | ||||||
|  | 	flipDrops := func() map[common.Hash]struct{} { | ||||||
|  | 		return map[common.Hash]struct{}{ | ||||||
|  | 			h2: struct{}{}, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	flipAccs := func() map[common.Hash][]byte { | ||||||
|  | 		return map[common.Hash][]byte{ | ||||||
|  | 			h1: randomAccount(), | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	flopDrops := func() map[common.Hash]struct{} { | ||||||
|  | 		return map[common.Hash]struct{}{ | ||||||
|  | 			h1: struct{}{}, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	flopAccs := func() map[common.Hash][]byte { | ||||||
|  | 		return map[common.Hash][]byte{ | ||||||
|  | 			h2: randomAccount(), | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Add some flipAccs-flopping layers on top
 | ||||||
|  | 	parent := newDiffLayer(emptyLayer(), common.Hash{}, flipDrops(), flipAccs(), storage) | ||||||
|  | 	child := parent.Update(common.Hash{}, flopDrops(), flopAccs(), storage) | ||||||
|  | 	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) | ||||||
|  | 	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) | ||||||
|  | 	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) | ||||||
|  | 	child = child.Update(common.Hash{}, flopDrops(), flopAccs(), storage) | ||||||
|  | 	child = child.Update(common.Hash{}, flipDrops(), flipAccs(), storage) | ||||||
|  | 
 | ||||||
|  | 	if data, _ := child.Account(h1); data == nil { | ||||||
|  | 		t.Errorf("last diff layer: expected %x account to be non-nil", h1) | ||||||
|  | 	} | ||||||
|  | 	if data, _ := child.Account(h2); data != nil { | ||||||
|  | 		t.Errorf("last diff layer: expected %x account to be nil", h2) | ||||||
|  | 	} | ||||||
|  | 	if _, ok := child.destructSet[h1]; ok { | ||||||
|  | 		t.Errorf("last diff layer: expected %x drop to be missing", h1) | ||||||
|  | 	} | ||||||
|  | 	if _, ok := child.destructSet[h2]; !ok { | ||||||
|  | 		t.Errorf("last diff layer: expected %x drop to be present", h1) | ||||||
|  | 	} | ||||||
|  | 	// And flatten
 | ||||||
|  | 	merged := (child.flatten()).(*diffLayer) | ||||||
|  | 
 | ||||||
|  | 	if data, _ := merged.Account(h1); data == nil { | ||||||
|  | 		t.Errorf("merged layer: expected %x account to be non-nil", h1) | ||||||
|  | 	} | ||||||
|  | 	if data, _ := merged.Account(h2); data != nil { | ||||||
|  | 		t.Errorf("merged layer: expected %x account to be nil", h2) | ||||||
|  | 	} | ||||||
|  | 	if _, ok := merged.destructSet[h1]; !ok { // Note, drops stay alive until persisted to disk!
 | ||||||
|  | 		t.Errorf("merged diff layer: expected %x drop to be present", h1) | ||||||
|  | 	} | ||||||
|  | 	if _, ok := merged.destructSet[h2]; !ok { // Note, drops stay alive until persisted to disk!
 | ||||||
|  | 		t.Errorf("merged diff layer: expected %x drop to be present", h1) | ||||||
|  | 	} | ||||||
|  | 	// If we add more granular metering of memory, we can enable this again,
 | ||||||
|  | 	// but it's not implemented for now
 | ||||||
|  | 	//if have, want := merged.memory, child.memory; have != want {
 | ||||||
|  | 	//	t.Errorf("mem wrong: have %d, want %d", have, want)
 | ||||||
|  | 	//}
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This tests that if we create a new account, and set a slot, and then merge
 | ||||||
|  | // it, the lists will be correct.
 | ||||||
|  | func TestInsertAndMerge(t *testing.T) { | ||||||
|  | 	// Fill up a parent
 | ||||||
|  | 	var ( | ||||||
|  | 		acc    = common.HexToHash("0x01") | ||||||
|  | 		slot   = common.HexToHash("0x02") | ||||||
|  | 		parent *diffLayer | ||||||
|  | 		child  *diffLayer | ||||||
|  | 	) | ||||||
|  | 	{ | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		parent = newDiffLayer(emptyLayer(), common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	{ | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		accounts[acc] = randomAccount() | ||||||
|  | 		storage[acc] = make(map[common.Hash][]byte) | ||||||
|  | 		storage[acc][slot] = []byte{0x01} | ||||||
|  | 		child = newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	// And flatten
 | ||||||
|  | 	merged := (child.flatten()).(*diffLayer) | ||||||
|  | 	{ // Check that slot value is present
 | ||||||
|  | 		have, _ := merged.Storage(acc, slot) | ||||||
|  | 		if want := []byte{0x01}; !bytes.Equal(have, want) { | ||||||
|  | 			t.Errorf("merged slot value wrong: have %x, want %x", have, want) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func emptyLayer() *diskLayer { | ||||||
|  | 	return &diskLayer{ | ||||||
|  | 		diskdb: memorydb.New(), | ||||||
|  | 		cache:  fastcache.New(500 * 1024), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BenchmarkSearch checks how long it takes to find a non-existing key
 | ||||||
|  | // BenchmarkSearch-6   	  200000	     10481 ns/op (1K per layer)
 | ||||||
|  | // BenchmarkSearch-6   	  200000	     10760 ns/op (10K per layer)
 | ||||||
|  | // BenchmarkSearch-6   	  100000	     17866 ns/op
 | ||||||
|  | //
 | ||||||
|  | // BenchmarkSearch-6   	  500000	      3723 ns/op (10k per layer, only top-level RLock()
 | ||||||
|  | func BenchmarkSearch(b *testing.B) { | ||||||
|  | 	// First, we set up 128 diff layers, with 1K items each
 | ||||||
|  | 	fill := func(parent snapshot) *diffLayer { | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		for i := 0; i < 10000; i++ { | ||||||
|  | 			accounts[randomHash()] = randomAccount() | ||||||
|  | 		} | ||||||
|  | 		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	var layer snapshot | ||||||
|  | 	layer = emptyLayer() | ||||||
|  | 	for i := 0; i < 128; i++ { | ||||||
|  | 		layer = fill(layer) | ||||||
|  | 	} | ||||||
|  | 	key := crypto.Keccak256Hash([]byte{0x13, 0x38}) | ||||||
|  | 	b.ResetTimer() | ||||||
|  | 	for i := 0; i < b.N; i++ { | ||||||
|  | 		layer.AccountRLP(key) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BenchmarkSearchSlot checks how long it takes to find a non-existing key
 | ||||||
|  | // - Number of layers: 128
 | ||||||
|  | // - Each layers contains the account, with a couple of storage slots
 | ||||||
|  | // BenchmarkSearchSlot-6   	  100000	     14554 ns/op
 | ||||||
|  | // BenchmarkSearchSlot-6   	  100000	     22254 ns/op (when checking parent root using mutex)
 | ||||||
|  | // BenchmarkSearchSlot-6   	  100000	     14551 ns/op (when checking parent number using atomic)
 | ||||||
|  | // With bloom filter:
 | ||||||
|  | // BenchmarkSearchSlot-6   	 3467835	       351 ns/op
 | ||||||
|  | func BenchmarkSearchSlot(b *testing.B) { | ||||||
|  | 	// First, we set up 128 diff layers, with 1K items each
 | ||||||
|  | 	accountKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) | ||||||
|  | 	storageKey := crypto.Keccak256Hash([]byte{0x13, 0x37}) | ||||||
|  | 	accountRLP := randomAccount() | ||||||
|  | 	fill := func(parent snapshot) *diffLayer { | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		accounts[accountKey] = accountRLP | ||||||
|  | 
 | ||||||
|  | 		accStorage := make(map[common.Hash][]byte) | ||||||
|  | 		for i := 0; i < 5; i++ { | ||||||
|  | 			value := make([]byte, 32) | ||||||
|  | 			rand.Read(value) | ||||||
|  | 			accStorage[randomHash()] = value | ||||||
|  | 			storage[accountKey] = accStorage | ||||||
|  | 		} | ||||||
|  | 		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	var layer snapshot | ||||||
|  | 	layer = emptyLayer() | ||||||
|  | 	for i := 0; i < 128; i++ { | ||||||
|  | 		layer = fill(layer) | ||||||
|  | 	} | ||||||
|  | 	b.ResetTimer() | ||||||
|  | 	for i := 0; i < b.N; i++ { | ||||||
|  | 		layer.Storage(accountKey, storageKey) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // With accountList and sorting
 | ||||||
|  | // BenchmarkFlatten-6   	      50	  29890856 ns/op
 | ||||||
|  | //
 | ||||||
|  | // Without sorting and tracking accountlist
 | ||||||
|  | // BenchmarkFlatten-6   	     300	   5511511 ns/op
 | ||||||
|  | func BenchmarkFlatten(b *testing.B) { | ||||||
|  | 	fill := func(parent snapshot) *diffLayer { | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		for i := 0; i < 100; i++ { | ||||||
|  | 			accountKey := randomHash() | ||||||
|  | 			accounts[accountKey] = randomAccount() | ||||||
|  | 
 | ||||||
|  | 			accStorage := make(map[common.Hash][]byte) | ||||||
|  | 			for i := 0; i < 20; i++ { | ||||||
|  | 				value := make([]byte, 32) | ||||||
|  | 				rand.Read(value) | ||||||
|  | 				accStorage[randomHash()] = value | ||||||
|  | 
 | ||||||
|  | 			} | ||||||
|  | 			storage[accountKey] = accStorage | ||||||
|  | 		} | ||||||
|  | 		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	b.ResetTimer() | ||||||
|  | 	for i := 0; i < b.N; i++ { | ||||||
|  | 		b.StopTimer() | ||||||
|  | 		var layer snapshot | ||||||
|  | 		layer = emptyLayer() | ||||||
|  | 		for i := 1; i < 128; i++ { | ||||||
|  | 			layer = fill(layer) | ||||||
|  | 		} | ||||||
|  | 		b.StartTimer() | ||||||
|  | 
 | ||||||
|  | 		for i := 1; i < 128; i++ { | ||||||
|  | 			dl, ok := layer.(*diffLayer) | ||||||
|  | 			if !ok { | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 			layer = dl.flatten() | ||||||
|  | 		} | ||||||
|  | 		b.StopTimer() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This test writes ~324M of diff layers to disk, spread over
 | ||||||
|  | // - 128 individual layers,
 | ||||||
|  | // - each with 200 accounts
 | ||||||
|  | // - containing 200 slots
 | ||||||
|  | //
 | ||||||
|  | // BenchmarkJournal-6   	       1	1471373923 ns/ops
 | ||||||
|  | // BenchmarkJournal-6   	       1	1208083335 ns/op // bufio writer
 | ||||||
|  | func BenchmarkJournal(b *testing.B) { | ||||||
|  | 	fill := func(parent snapshot) *diffLayer { | ||||||
|  | 		var ( | ||||||
|  | 			destructs = make(map[common.Hash]struct{}) | ||||||
|  | 			accounts  = make(map[common.Hash][]byte) | ||||||
|  | 			storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		) | ||||||
|  | 		for i := 0; i < 200; i++ { | ||||||
|  | 			accountKey := randomHash() | ||||||
|  | 			accounts[accountKey] = randomAccount() | ||||||
|  | 
 | ||||||
|  | 			accStorage := make(map[common.Hash][]byte) | ||||||
|  | 			for i := 0; i < 200; i++ { | ||||||
|  | 				value := make([]byte, 32) | ||||||
|  | 				rand.Read(value) | ||||||
|  | 				accStorage[randomHash()] = value | ||||||
|  | 
 | ||||||
|  | 			} | ||||||
|  | 			storage[accountKey] = accStorage | ||||||
|  | 		} | ||||||
|  | 		return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) | ||||||
|  | 	} | ||||||
|  | 	layer := snapshot(new(diskLayer)) | ||||||
|  | 	for i := 1; i < 128; i++ { | ||||||
|  | 		layer = fill(layer) | ||||||
|  | 	} | ||||||
|  | 	b.ResetTimer() | ||||||
|  | 
 | ||||||
|  | 	for i := 0; i < b.N; i++ { | ||||||
|  | 		layer.Journal(new(bytes.Buffer)) | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										166
									
								
								core/state/snapshot/disklayer.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										166
									
								
								core/state/snapshot/disklayer.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,166 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | 	"github.com/ethereum/go-ethereum/trie" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // diskLayer is a low level persistent snapshot built on top of a key-value store.
 | ||||||
|  | type diskLayer struct { | ||||||
|  | 	diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
 | ||||||
|  | 	triedb *trie.Database      // Trie node cache for reconstuction purposes
 | ||||||
|  | 	cache  *fastcache.Cache    // Cache to avoid hitting the disk for direct access
 | ||||||
|  | 
 | ||||||
|  | 	root  common.Hash // Root hash of the base snapshot
 | ||||||
|  | 	stale bool        // Signals that the layer became stale (state progressed)
 | ||||||
|  | 
 | ||||||
|  | 	genMarker  []byte                    // Marker for the state that's indexed during initial layer generation
 | ||||||
|  | 	genPending chan struct{}             // Notification channel when generation is done (test synchronicity)
 | ||||||
|  | 	genAbort   chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
 | ||||||
|  | 
 | ||||||
|  | 	lock sync.RWMutex | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Root returns  root hash for which this snapshot was made.
 | ||||||
|  | func (dl *diskLayer) Root() common.Hash { | ||||||
|  | 	return dl.root | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parent always returns nil as there's no layer below the disk.
 | ||||||
|  | func (dl *diskLayer) Parent() snapshot { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Stale return whether this layer has become stale (was flattened across) or if
 | ||||||
|  | // it's still live.
 | ||||||
|  | func (dl *diskLayer) Stale() bool { | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	return dl.stale | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account directly retrieves the account associated with a particular hash in
 | ||||||
|  | // the snapshot slim data format.
 | ||||||
|  | func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { | ||||||
|  | 	data, err := dl.AccountRLP(hash) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	if len(data) == 0 { // can be both nil and []byte{}
 | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 	account := new(Account) | ||||||
|  | 	if err := rlp.DecodeBytes(data, account); err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	return account, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountRLP directly retrieves the account RLP associated with a particular
 | ||||||
|  | // hash in the snapshot slim data format.
 | ||||||
|  | func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) { | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the layer was flattened into, consider it invalid (any live reference to
 | ||||||
|  | 	// the original should be marked as unusable).
 | ||||||
|  | 	if dl.stale { | ||||||
|  | 		return nil, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	// If the layer is being generated, ensure the requested hash has already been
 | ||||||
|  | 	// covered by the generator.
 | ||||||
|  | 	if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 { | ||||||
|  | 		return nil, ErrNotCoveredYet | ||||||
|  | 	} | ||||||
|  | 	// If we're in the disk layer, all diff layers missed
 | ||||||
|  | 	snapshotDirtyAccountMissMeter.Mark(1) | ||||||
|  | 
 | ||||||
|  | 	// Try to retrieve the account from the memory cache
 | ||||||
|  | 	if blob, found := dl.cache.HasGet(nil, hash[:]); found { | ||||||
|  | 		snapshotCleanAccountHitMeter.Mark(1) | ||||||
|  | 		snapshotCleanAccountReadMeter.Mark(int64(len(blob))) | ||||||
|  | 		return blob, nil | ||||||
|  | 	} | ||||||
|  | 	// Cache doesn't contain account, pull from disk and cache for later
 | ||||||
|  | 	blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) | ||||||
|  | 	dl.cache.Set(hash[:], blob) | ||||||
|  | 
 | ||||||
|  | 	snapshotCleanAccountMissMeter.Mark(1) | ||||||
|  | 	if n := len(blob); n > 0 { | ||||||
|  | 		snapshotCleanAccountWriteMeter.Mark(int64(n)) | ||||||
|  | 	} else { | ||||||
|  | 		snapshotCleanAccountInexMeter.Mark(1) | ||||||
|  | 	} | ||||||
|  | 	return blob, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Storage directly retrieves the storage data associated with a particular hash,
 | ||||||
|  | // within a particular account.
 | ||||||
|  | func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// If the layer was flattened into, consider it invalid (any live reference to
 | ||||||
|  | 	// the original should be marked as unusable).
 | ||||||
|  | 	if dl.stale { | ||||||
|  | 		return nil, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	key := append(accountHash[:], storageHash[:]...) | ||||||
|  | 
 | ||||||
|  | 	// If the layer is being generated, ensure the requested hash has already been
 | ||||||
|  | 	// covered by the generator.
 | ||||||
|  | 	if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 { | ||||||
|  | 		return nil, ErrNotCoveredYet | ||||||
|  | 	} | ||||||
|  | 	// If we're in the disk layer, all diff layers missed
 | ||||||
|  | 	snapshotDirtyStorageMissMeter.Mark(1) | ||||||
|  | 
 | ||||||
|  | 	// Try to retrieve the storage slot from the memory cache
 | ||||||
|  | 	if blob, found := dl.cache.HasGet(nil, key); found { | ||||||
|  | 		snapshotCleanStorageHitMeter.Mark(1) | ||||||
|  | 		snapshotCleanStorageReadMeter.Mark(int64(len(blob))) | ||||||
|  | 		return blob, nil | ||||||
|  | 	} | ||||||
|  | 	// Cache doesn't contain storage slot, pull from disk and cache for later
 | ||||||
|  | 	blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) | ||||||
|  | 	dl.cache.Set(key, blob) | ||||||
|  | 
 | ||||||
|  | 	snapshotCleanStorageMissMeter.Mark(1) | ||||||
|  | 	if n := len(blob); n > 0 { | ||||||
|  | 		snapshotCleanStorageWriteMeter.Mark(int64(n)) | ||||||
|  | 	} else { | ||||||
|  | 		snapshotCleanStorageInexMeter.Mark(1) | ||||||
|  | 	} | ||||||
|  | 	return blob, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Update creates a new layer on top of the existing snapshot diff tree with
 | ||||||
|  | // the specified data items. Note, the maps are retained by the method to avoid
 | ||||||
|  | // copying everything.
 | ||||||
|  | func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||||
|  | 	return newDiffLayer(dl, blockHash, destructs, accounts, storage) | ||||||
|  | } | ||||||
							
								
								
									
										435
									
								
								core/state/snapshot/disklayer_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										435
									
								
								core/state/snapshot/disklayer_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,435 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb/memorydb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // reverse reverses the contents of a byte slice. It's used to update random accs
 | ||||||
|  | // with deterministic changes.
 | ||||||
|  | func reverse(blob []byte) []byte { | ||||||
|  | 	res := make([]byte, len(blob)) | ||||||
|  | 	for i, b := range blob { | ||||||
|  | 		res[len(blob)-1-i] = b | ||||||
|  | 	} | ||||||
|  | 	return res | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that merging something into a disk layer persists it into the database
 | ||||||
|  | // and invalidates any previously written and cached values.
 | ||||||
|  | func TestDiskMerge(t *testing.T) { | ||||||
|  | 	// Create some accounts in the disk layer
 | ||||||
|  | 	db := memorydb.New() | ||||||
|  | 
 | ||||||
|  | 	var ( | ||||||
|  | 		accNoModNoCache     = common.Hash{0x1} | ||||||
|  | 		accNoModCache       = common.Hash{0x2} | ||||||
|  | 		accModNoCache       = common.Hash{0x3} | ||||||
|  | 		accModCache         = common.Hash{0x4} | ||||||
|  | 		accDelNoCache       = common.Hash{0x5} | ||||||
|  | 		accDelCache         = common.Hash{0x6} | ||||||
|  | 		conNoModNoCache     = common.Hash{0x7} | ||||||
|  | 		conNoModNoCacheSlot = common.Hash{0x70} | ||||||
|  | 		conNoModCache       = common.Hash{0x8} | ||||||
|  | 		conNoModCacheSlot   = common.Hash{0x80} | ||||||
|  | 		conModNoCache       = common.Hash{0x9} | ||||||
|  | 		conModNoCacheSlot   = common.Hash{0x90} | ||||||
|  | 		conModCache         = common.Hash{0xa} | ||||||
|  | 		conModCacheSlot     = common.Hash{0xa0} | ||||||
|  | 		conDelNoCache       = common.Hash{0xb} | ||||||
|  | 		conDelNoCacheSlot   = common.Hash{0xb0} | ||||||
|  | 		conDelCache         = common.Hash{0xc} | ||||||
|  | 		conDelCacheSlot     = common.Hash{0xc0} | ||||||
|  | 		conNukeNoCache      = common.Hash{0xd} | ||||||
|  | 		conNukeNoCacheSlot  = common.Hash{0xd0} | ||||||
|  | 		conNukeCache        = common.Hash{0xe} | ||||||
|  | 		conNukeCacheSlot    = common.Hash{0xe0} | ||||||
|  | 		baseRoot            = randomHash() | ||||||
|  | 		diffRoot            = randomHash() | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) | ||||||
|  | 
 | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) | ||||||
|  | 
 | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) | ||||||
|  | 	rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) | ||||||
|  | 	rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) | ||||||
|  | 
 | ||||||
|  | 	rawdb.WriteSnapshotRoot(db, baseRoot) | ||||||
|  | 
 | ||||||
|  | 	// Create a disk layer based on the above and cache in some data
 | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			baseRoot: &diskLayer{ | ||||||
|  | 				diskdb: db, | ||||||
|  | 				cache:  fastcache.New(500 * 1024), | ||||||
|  | 				root:   baseRoot, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	base := snaps.Snapshot(baseRoot) | ||||||
|  | 	base.AccountRLP(accNoModCache) | ||||||
|  | 	base.AccountRLP(accModCache) | ||||||
|  | 	base.AccountRLP(accDelCache) | ||||||
|  | 	base.Storage(conNoModCache, conNoModCacheSlot) | ||||||
|  | 	base.Storage(conModCache, conModCacheSlot) | ||||||
|  | 	base.Storage(conDelCache, conDelCacheSlot) | ||||||
|  | 	base.Storage(conNukeCache, conNukeCacheSlot) | ||||||
|  | 
 | ||||||
|  | 	// Modify or delete some accounts, flatten everything onto disk
 | ||||||
|  | 	if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ | ||||||
|  | 		accDelNoCache:  struct{}{}, | ||||||
|  | 		accDelCache:    struct{}{}, | ||||||
|  | 		conNukeNoCache: struct{}{}, | ||||||
|  | 		conNukeCache:   struct{}{}, | ||||||
|  | 	}, map[common.Hash][]byte{ | ||||||
|  | 		accModNoCache: reverse(accModNoCache[:]), | ||||||
|  | 		accModCache:   reverse(accModCache[:]), | ||||||
|  | 	}, map[common.Hash]map[common.Hash][]byte{ | ||||||
|  | 		conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, | ||||||
|  | 		conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])}, | ||||||
|  | 		conDelNoCache: {conDelNoCacheSlot: nil}, | ||||||
|  | 		conDelCache:   {conDelCacheSlot: nil}, | ||||||
|  | 	}); err != nil { | ||||||
|  | 		t.Fatalf("failed to update snapshot tree: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Cap(diffRoot, 0); err != nil { | ||||||
|  | 		t.Fatalf("failed to flatten snapshot tree: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Retrieve all the data through the disk layer and validate it
 | ||||||
|  | 	base = snaps.Snapshot(diffRoot) | ||||||
|  | 	if _, ok := base.(*diskLayer); !ok { | ||||||
|  | 		t.Fatalf("update not flattend into the disk layer") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// assertAccount ensures that an account matches the given blob.
 | ||||||
|  | 	assertAccount := func(account common.Hash, data []byte) { | ||||||
|  | 		t.Helper() | ||||||
|  | 		blob, err := base.AccountRLP(account) | ||||||
|  | 		if err != nil { | ||||||
|  | 			t.Errorf("account access (%x) failed: %v", account, err) | ||||||
|  | 		} else if !bytes.Equal(blob, data) { | ||||||
|  | 			t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	assertAccount(accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 	assertAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 	assertAccount(accModNoCache, reverse(accModNoCache[:])) | ||||||
|  | 	assertAccount(accModCache, reverse(accModCache[:])) | ||||||
|  | 	assertAccount(accDelNoCache, nil) | ||||||
|  | 	assertAccount(accDelCache, nil) | ||||||
|  | 
 | ||||||
|  | 	// assertStorage ensures that a storage slot matches the given blob.
 | ||||||
|  | 	assertStorage := func(account common.Hash, slot common.Hash, data []byte) { | ||||||
|  | 		t.Helper() | ||||||
|  | 		blob, err := base.Storage(account, slot) | ||||||
|  | 		if err != nil { | ||||||
|  | 			t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) | ||||||
|  | 		} else if !bytes.Equal(blob, data) { | ||||||
|  | 			t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 	assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 	assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) | ||||||
|  | 	assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) | ||||||
|  | 	assertStorage(conDelNoCache, conDelNoCacheSlot, nil) | ||||||
|  | 	assertStorage(conDelCache, conDelCacheSlot, nil) | ||||||
|  | 	assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) | ||||||
|  | 	assertStorage(conNukeCache, conNukeCacheSlot, nil) | ||||||
|  | 
 | ||||||
|  | 	// Retrieve all the data directly from the database and validate it
 | ||||||
|  | 
 | ||||||
|  | 	// assertDatabaseAccount ensures that an account from the database matches the given blob.
 | ||||||
|  | 	assertDatabaseAccount := func(account common.Hash, data []byte) { | ||||||
|  | 		t.Helper() | ||||||
|  | 		if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { | ||||||
|  | 			t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 	assertDatabaseAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 	assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) | ||||||
|  | 	assertDatabaseAccount(accModCache, reverse(accModCache[:])) | ||||||
|  | 	assertDatabaseAccount(accDelNoCache, nil) | ||||||
|  | 	assertDatabaseAccount(accDelCache, nil) | ||||||
|  | 
 | ||||||
|  | 	// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
 | ||||||
|  | 	assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { | ||||||
|  | 		t.Helper() | ||||||
|  | 		if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { | ||||||
|  | 			t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 	assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 	assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) | ||||||
|  | 	assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) | ||||||
|  | 	assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) | ||||||
|  | 	assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) | ||||||
|  | 	assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) | ||||||
|  | 	assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that merging something into a disk layer persists it into the database
 | ||||||
|  | // and invalidates any previously written and cached values, discarding anything
 | ||||||
|  | // after the in-progress generation marker.
 | ||||||
|  | func TestDiskPartialMerge(t *testing.T) { | ||||||
|  | 	// Iterate the test a few times to ensure we pick various internal orderings
 | ||||||
|  | 	// for the data slots as well as the progress marker.
 | ||||||
|  | 	for i := 0; i < 1024; i++ { | ||||||
|  | 		// Create some accounts in the disk layer
 | ||||||
|  | 		db := memorydb.New() | ||||||
|  | 
 | ||||||
|  | 		var ( | ||||||
|  | 			accNoModNoCache     = randomHash() | ||||||
|  | 			accNoModCache       = randomHash() | ||||||
|  | 			accModNoCache       = randomHash() | ||||||
|  | 			accModCache         = randomHash() | ||||||
|  | 			accDelNoCache       = randomHash() | ||||||
|  | 			accDelCache         = randomHash() | ||||||
|  | 			conNoModNoCache     = randomHash() | ||||||
|  | 			conNoModNoCacheSlot = randomHash() | ||||||
|  | 			conNoModCache       = randomHash() | ||||||
|  | 			conNoModCacheSlot   = randomHash() | ||||||
|  | 			conModNoCache       = randomHash() | ||||||
|  | 			conModNoCacheSlot   = randomHash() | ||||||
|  | 			conModCache         = randomHash() | ||||||
|  | 			conModCacheSlot     = randomHash() | ||||||
|  | 			conDelNoCache       = randomHash() | ||||||
|  | 			conDelNoCacheSlot   = randomHash() | ||||||
|  | 			conDelCache         = randomHash() | ||||||
|  | 			conDelCacheSlot     = randomHash() | ||||||
|  | 			conNukeNoCache      = randomHash() | ||||||
|  | 			conNukeNoCacheSlot  = randomHash() | ||||||
|  | 			conNukeCache        = randomHash() | ||||||
|  | 			conNukeCacheSlot    = randomHash() | ||||||
|  | 			baseRoot            = randomHash() | ||||||
|  | 			diffRoot            = randomHash() | ||||||
|  | 			genMarker           = append(randomHash().Bytes(), randomHash().Bytes()...) | ||||||
|  | 		) | ||||||
|  | 
 | ||||||
|  | 		// insertAccount injects an account into the database if it's after the
 | ||||||
|  | 		// generator marker, drops the op otherwise. This is needed to seed the
 | ||||||
|  | 		// database with a valid starting snapshot.
 | ||||||
|  | 		insertAccount := func(account common.Hash, data []byte) { | ||||||
|  | 			if bytes.Compare(account[:], genMarker) <= 0 { | ||||||
|  | 				rawdb.WriteAccountSnapshot(db, account, data[:]) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		insertAccount(accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 		insertAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 		insertAccount(accModNoCache, accModNoCache[:]) | ||||||
|  | 		insertAccount(accModCache, accModCache[:]) | ||||||
|  | 		insertAccount(accDelNoCache, accDelNoCache[:]) | ||||||
|  | 		insertAccount(accDelCache, accDelCache[:]) | ||||||
|  | 
 | ||||||
|  | 		// insertStorage injects a storage slot into the database if it's after
 | ||||||
|  | 		// the  generator marker, drops the op otherwise. This is needed to seed
 | ||||||
|  | 		// the  database with a valid starting snapshot.
 | ||||||
|  | 		insertStorage := func(account common.Hash, slot common.Hash, data []byte) { | ||||||
|  | 			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { | ||||||
|  | 				rawdb.WriteStorageSnapshot(db, account, slot, data[:]) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		insertAccount(conNoModNoCache, conNoModNoCache[:]) | ||||||
|  | 		insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 		insertAccount(conNoModCache, conNoModCache[:]) | ||||||
|  | 		insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 		insertAccount(conModNoCache, conModNoCache[:]) | ||||||
|  | 		insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) | ||||||
|  | 		insertAccount(conModCache, conModCache[:]) | ||||||
|  | 		insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) | ||||||
|  | 		insertAccount(conDelNoCache, conDelNoCache[:]) | ||||||
|  | 		insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) | ||||||
|  | 		insertAccount(conDelCache, conDelCache[:]) | ||||||
|  | 		insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) | ||||||
|  | 
 | ||||||
|  | 		insertAccount(conNukeNoCache, conNukeNoCache[:]) | ||||||
|  | 		insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) | ||||||
|  | 		insertAccount(conNukeCache, conNukeCache[:]) | ||||||
|  | 		insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) | ||||||
|  | 
 | ||||||
|  | 		rawdb.WriteSnapshotRoot(db, baseRoot) | ||||||
|  | 
 | ||||||
|  | 		// Create a disk layer based on the above using a random progress marker
 | ||||||
|  | 		// and cache in some data.
 | ||||||
|  | 		snaps := &Tree{ | ||||||
|  | 			layers: map[common.Hash]snapshot{ | ||||||
|  | 				baseRoot: &diskLayer{ | ||||||
|  | 					diskdb: db, | ||||||
|  | 					cache:  fastcache.New(500 * 1024), | ||||||
|  | 					root:   baseRoot, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		} | ||||||
|  | 		snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker | ||||||
|  | 		base := snaps.Snapshot(baseRoot) | ||||||
|  | 
 | ||||||
|  | 		// assertAccount ensures that an account matches the given blob if it's
 | ||||||
|  | 		// already covered by the disk snapshot, and errors out otherwise.
 | ||||||
|  | 		assertAccount := func(account common.Hash, data []byte) { | ||||||
|  | 			t.Helper() | ||||||
|  | 			blob, err := base.AccountRLP(account) | ||||||
|  | 			if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { | ||||||
|  | 				t.Fatalf("test %d: post-marker (%x) account access (%x) succeeded: %x", i, genMarker, account, blob) | ||||||
|  | 			} | ||||||
|  | 			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { | ||||||
|  | 				t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		assertAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 		assertAccount(accModCache, accModCache[:]) | ||||||
|  | 		assertAccount(accDelCache, accDelCache[:]) | ||||||
|  | 
 | ||||||
|  | 		// assertStorage ensures that a storage slot matches the given blob if
 | ||||||
|  | 		// it's already covered by the disk snapshot, and errors out otherwise.
 | ||||||
|  | 		assertStorage := func(account common.Hash, slot common.Hash, data []byte) { | ||||||
|  | 			t.Helper() | ||||||
|  | 			blob, err := base.Storage(account, slot) | ||||||
|  | 			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { | ||||||
|  | 				t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) | ||||||
|  | 			} | ||||||
|  | 			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { | ||||||
|  | 				t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 		assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) | ||||||
|  | 		assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) | ||||||
|  | 		assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) | ||||||
|  | 
 | ||||||
|  | 		// Modify or delete some accounts, flatten everything onto disk
 | ||||||
|  | 		if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{ | ||||||
|  | 			accDelNoCache:  struct{}{}, | ||||||
|  | 			accDelCache:    struct{}{}, | ||||||
|  | 			conNukeNoCache: struct{}{}, | ||||||
|  | 			conNukeCache:   struct{}{}, | ||||||
|  | 		}, map[common.Hash][]byte{ | ||||||
|  | 			accModNoCache: reverse(accModNoCache[:]), | ||||||
|  | 			accModCache:   reverse(accModCache[:]), | ||||||
|  | 		}, map[common.Hash]map[common.Hash][]byte{ | ||||||
|  | 			conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, | ||||||
|  | 			conModCache:   {conModCacheSlot: reverse(conModCacheSlot[:])}, | ||||||
|  | 			conDelNoCache: {conDelNoCacheSlot: nil}, | ||||||
|  | 			conDelCache:   {conDelCacheSlot: nil}, | ||||||
|  | 		}); err != nil { | ||||||
|  | 			t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) | ||||||
|  | 		} | ||||||
|  | 		if err := snaps.Cap(diffRoot, 0); err != nil { | ||||||
|  | 			t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) | ||||||
|  | 		} | ||||||
|  | 		// Retrieve all the data through the disk layer and validate it
 | ||||||
|  | 		base = snaps.Snapshot(diffRoot) | ||||||
|  | 		if _, ok := base.(*diskLayer); !ok { | ||||||
|  | 			t.Fatalf("test %d: update not flattend into the disk layer", i) | ||||||
|  | 		} | ||||||
|  | 		assertAccount(accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 		assertAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 		assertAccount(accModNoCache, reverse(accModNoCache[:])) | ||||||
|  | 		assertAccount(accModCache, reverse(accModCache[:])) | ||||||
|  | 		assertAccount(accDelNoCache, nil) | ||||||
|  | 		assertAccount(accDelCache, nil) | ||||||
|  | 
 | ||||||
|  | 		assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 		assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 		assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) | ||||||
|  | 		assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) | ||||||
|  | 		assertStorage(conDelNoCache, conDelNoCacheSlot, nil) | ||||||
|  | 		assertStorage(conDelCache, conDelCacheSlot, nil) | ||||||
|  | 		assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) | ||||||
|  | 		assertStorage(conNukeCache, conNukeCacheSlot, nil) | ||||||
|  | 
 | ||||||
|  | 		// Retrieve all the data directly from the database and validate it
 | ||||||
|  | 
 | ||||||
|  | 		// assertDatabaseAccount ensures that an account inside the database matches
 | ||||||
|  | 		// the given blob if it's already covered by the disk snapshot, and does not
 | ||||||
|  | 		// exist otherwise.
 | ||||||
|  | 		assertDatabaseAccount := func(account common.Hash, data []byte) { | ||||||
|  | 			t.Helper() | ||||||
|  | 			blob := rawdb.ReadAccountSnapshot(db, account) | ||||||
|  | 			if bytes.Compare(account[:], genMarker) > 0 && blob != nil { | ||||||
|  | 				t.Fatalf("test %d: post-marker (%x) account database access (%x) succeeded: %x", i, genMarker, account, blob) | ||||||
|  | 			} | ||||||
|  | 			if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { | ||||||
|  | 				t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) | ||||||
|  | 		assertDatabaseAccount(accNoModCache, accNoModCache[:]) | ||||||
|  | 		assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) | ||||||
|  | 		assertDatabaseAccount(accModCache, reverse(accModCache[:])) | ||||||
|  | 		assertDatabaseAccount(accDelNoCache, nil) | ||||||
|  | 		assertDatabaseAccount(accDelCache, nil) | ||||||
|  | 
 | ||||||
|  | 		// assertDatabaseStorage ensures that a storage slot inside the database
 | ||||||
|  | 		// matches the given blob if it's already covered by the disk snapshot,
 | ||||||
|  | 		// and does not exist otherwise.
 | ||||||
|  | 		assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { | ||||||
|  | 			t.Helper() | ||||||
|  | 			blob := rawdb.ReadStorageSnapshot(db, account, slot) | ||||||
|  | 			if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { | ||||||
|  | 				t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeeded: %x", i, genMarker, account, slot, blob) | ||||||
|  | 			} | ||||||
|  | 			if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { | ||||||
|  | 				t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) | ||||||
|  | 		assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) | ||||||
|  | 		assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) | ||||||
|  | 		assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) | ||||||
|  | 		assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) | ||||||
|  | 		assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) | ||||||
|  | 		assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) | ||||||
|  | 		assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that merging something into a disk layer persists it into the database
 | ||||||
|  | // and invalidates any previously written and cached values, discarding anything
 | ||||||
|  | // after the in-progress generation marker.
 | ||||||
|  | //
 | ||||||
|  | // This test case is a tiny specialized case of TestDiskPartialMerge, which tests
 | ||||||
|  | // some very specific cornercases that random tests won't ever trigger.
 | ||||||
|  | func TestDiskMidAccountPartialMerge(t *testing.T) { | ||||||
|  | } | ||||||
							
								
								
									
										262
									
								
								core/state/snapshot/generate.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										262
									
								
								core/state/snapshot/generate.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,262 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"math/big" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common/math" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/crypto" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/log" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | 	"github.com/ethereum/go-ethereum/trie" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// emptyRoot is the known root hash of an empty trie.
 | ||||||
|  | 	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") | ||||||
|  | 
 | ||||||
|  | 	// emptyCode is the known hash of the empty EVM bytecode.
 | ||||||
|  | 	emptyCode = crypto.Keccak256Hash(nil) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // generatorStats is a collection of statistics gathered by the snapshot generator
 | ||||||
|  | // for  logging purposes.
 | ||||||
|  | type generatorStats struct { | ||||||
|  | 	wiping   chan struct{}      // Notification channel if wiping is in progress
 | ||||||
|  | 	origin   uint64             // Origin prefix where generation started
 | ||||||
|  | 	start    time.Time          // Timestamp when generation started
 | ||||||
|  | 	accounts uint64             // Number of accounts indexed
 | ||||||
|  | 	slots    uint64             // Number of storage slots indexed
 | ||||||
|  | 	storage  common.StorageSize // Account and storage slot size
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Log creates an contextual log with the given message and the context pulled
 | ||||||
|  | // from the internally maintained statistics.
 | ||||||
|  | func (gs *generatorStats) Log(msg string, marker []byte) { | ||||||
|  | 	var ctx []interface{} | ||||||
|  | 
 | ||||||
|  | 	// Figure out whether we're after or within an account
 | ||||||
|  | 	switch len(marker) { | ||||||
|  | 	case common.HashLength: | ||||||
|  | 		ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) | ||||||
|  | 	case 2 * common.HashLength: | ||||||
|  | 		ctx = append(ctx, []interface{}{ | ||||||
|  | 			"in", common.BytesToHash(marker[:common.HashLength]), | ||||||
|  | 			"at", common.BytesToHash(marker[common.HashLength:]), | ||||||
|  | 		}...) | ||||||
|  | 	} | ||||||
|  | 	// Add the usual measurements
 | ||||||
|  | 	ctx = append(ctx, []interface{}{ | ||||||
|  | 		"accounts", gs.accounts, | ||||||
|  | 		"slots", gs.slots, | ||||||
|  | 		"storage", gs.storage, | ||||||
|  | 		"elapsed", common.PrettyDuration(time.Since(gs.start)), | ||||||
|  | 	}...) | ||||||
|  | 	// Calculate the estimated indexing time based on current stats
 | ||||||
|  | 	if len(marker) > 0 { | ||||||
|  | 		if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { | ||||||
|  | 			left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) | ||||||
|  | 
 | ||||||
|  | 			speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
 | ||||||
|  | 			ctx = append(ctx, []interface{}{ | ||||||
|  | 				"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), | ||||||
|  | 			}...) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	log.Info(msg, ctx...) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // generateSnapshot regenerates a brand new snapshot based on an existing state
 | ||||||
|  | // database and head block asynchronously. The snapshot is returned immediately
 | ||||||
|  | // and generation is continued in the background until done.
 | ||||||
|  | func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, wiper chan struct{}) *diskLayer { | ||||||
|  | 	// Wipe any previously existing snapshot from the database if no wiper is
 | ||||||
|  | 	// currently in progress.
 | ||||||
|  | 	if wiper == nil { | ||||||
|  | 		wiper = wipeSnapshot(diskdb, true) | ||||||
|  | 	} | ||||||
|  | 	// Create a new disk layer with an initialized state marker at zero
 | ||||||
|  | 	rawdb.WriteSnapshotRoot(diskdb, root) | ||||||
|  | 
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb:     diskdb, | ||||||
|  | 		triedb:     triedb, | ||||||
|  | 		root:       root, | ||||||
|  | 		cache:      fastcache.New(cache * 1024 * 1024), | ||||||
|  | 		genMarker:  []byte{}, // Initialized but empty!
 | ||||||
|  | 		genPending: make(chan struct{}), | ||||||
|  | 		genAbort:   make(chan chan *generatorStats), | ||||||
|  | 	} | ||||||
|  | 	go base.generate(&generatorStats{wiping: wiper, start: time.Now()}) | ||||||
|  | 	return base | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // generate is a background thread that iterates over the state and storage tries,
 | ||||||
|  | // constructing the state snapshot. All the arguments are purely for statistics
 | ||||||
|  | // gethering and logging, since the method surfs the blocks as they arrive, often
 | ||||||
|  | // being restarted.
 | ||||||
|  | func (dl *diskLayer) generate(stats *generatorStats) { | ||||||
|  | 	// If a database wipe is in operation, wait until it's done
 | ||||||
|  | 	if stats.wiping != nil { | ||||||
|  | 		stats.Log("Wiper running, state snapshotting paused", dl.genMarker) | ||||||
|  | 		select { | ||||||
|  | 		// If wiper is done, resume normal mode of operation
 | ||||||
|  | 		case <-stats.wiping: | ||||||
|  | 			stats.wiping = nil | ||||||
|  | 			stats.start = time.Now() | ||||||
|  | 
 | ||||||
|  | 		// If generator was aboted during wipe, return
 | ||||||
|  | 		case abort := <-dl.genAbort: | ||||||
|  | 			abort <- stats | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Create an account and state iterator pointing to the current generator marker
 | ||||||
|  | 	accTrie, err := trie.NewSecure(dl.root, dl.triedb) | ||||||
|  | 	if err != nil { | ||||||
|  | 		// The account trie is missing (GC), surf the chain until one becomes available
 | ||||||
|  | 		stats.Log("Trie missing, state snapshotting paused", dl.genMarker) | ||||||
|  | 
 | ||||||
|  | 		abort := <-dl.genAbort | ||||||
|  | 		abort <- stats | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	stats.Log("Resuming state snapshot generation", dl.genMarker) | ||||||
|  | 
 | ||||||
|  | 	var accMarker []byte | ||||||
|  | 	if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
 | ||||||
|  | 		accMarker = dl.genMarker[:common.HashLength] | ||||||
|  | 	} | ||||||
|  | 	accIt := trie.NewIterator(accTrie.NodeIterator(accMarker)) | ||||||
|  | 	batch := dl.diskdb.NewBatch() | ||||||
|  | 
 | ||||||
|  | 	// Iterate from the previous marker and continue generating the state snapshot
 | ||||||
|  | 	logged := time.Now() | ||||||
|  | 	for accIt.Next() { | ||||||
|  | 		// Retrieve the current account and flatten it into the internal format
 | ||||||
|  | 		accountHash := common.BytesToHash(accIt.Key) | ||||||
|  | 
 | ||||||
|  | 		var acc struct { | ||||||
|  | 			Nonce    uint64 | ||||||
|  | 			Balance  *big.Int | ||||||
|  | 			Root     common.Hash | ||||||
|  | 			CodeHash []byte | ||||||
|  | 		} | ||||||
|  | 		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { | ||||||
|  | 			log.Crit("Invalid account encountered during snapshot creation", "err", err) | ||||||
|  | 		} | ||||||
|  | 		data := AccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) | ||||||
|  | 
 | ||||||
|  | 		// If the account is not yet in-progress, write it out
 | ||||||
|  | 		if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { | ||||||
|  | 			rawdb.WriteAccountSnapshot(batch, accountHash, data) | ||||||
|  | 			stats.storage += common.StorageSize(1 + common.HashLength + len(data)) | ||||||
|  | 			stats.accounts++ | ||||||
|  | 		} | ||||||
|  | 		// If we've exceeded our batch allowance or termination was requested, flush to disk
 | ||||||
|  | 		var abort chan *generatorStats | ||||||
|  | 		select { | ||||||
|  | 		case abort = <-dl.genAbort: | ||||||
|  | 		default: | ||||||
|  | 		} | ||||||
|  | 		if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { | ||||||
|  | 			// Only write and set the marker if we actually did something useful
 | ||||||
|  | 			if batch.ValueSize() > 0 { | ||||||
|  | 				batch.Write() | ||||||
|  | 				batch.Reset() | ||||||
|  | 
 | ||||||
|  | 				dl.lock.Lock() | ||||||
|  | 				dl.genMarker = accountHash[:] | ||||||
|  | 				dl.lock.Unlock() | ||||||
|  | 			} | ||||||
|  | 			if abort != nil { | ||||||
|  | 				stats.Log("Aborting state snapshot generation", accountHash[:]) | ||||||
|  | 				abort <- stats | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		// If the account is in-progress, continue where we left off (otherwise iterate all)
 | ||||||
|  | 		if acc.Root != emptyRoot { | ||||||
|  | 			storeTrie, err := trie.NewSecure(acc.Root, dl.triedb) | ||||||
|  | 			if err != nil { | ||||||
|  | 				log.Crit("Storage trie inaccessible for snapshot generation", "err", err) | ||||||
|  | 			} | ||||||
|  | 			var storeMarker []byte | ||||||
|  | 			if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { | ||||||
|  | 				storeMarker = dl.genMarker[common.HashLength:] | ||||||
|  | 			} | ||||||
|  | 			storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker)) | ||||||
|  | 			for storeIt.Next() { | ||||||
|  | 				rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value) | ||||||
|  | 				stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value)) | ||||||
|  | 				stats.slots++ | ||||||
|  | 
 | ||||||
|  | 				// If we've exceeded our batch allowance or termination was requested, flush to disk
 | ||||||
|  | 				var abort chan *generatorStats | ||||||
|  | 				select { | ||||||
|  | 				case abort = <-dl.genAbort: | ||||||
|  | 				default: | ||||||
|  | 				} | ||||||
|  | 				if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { | ||||||
|  | 					// Only write and set the marker if we actually did something useful
 | ||||||
|  | 					if batch.ValueSize() > 0 { | ||||||
|  | 						batch.Write() | ||||||
|  | 						batch.Reset() | ||||||
|  | 
 | ||||||
|  | 						dl.lock.Lock() | ||||||
|  | 						dl.genMarker = append(accountHash[:], storeIt.Key...) | ||||||
|  | 						dl.lock.Unlock() | ||||||
|  | 					} | ||||||
|  | 					if abort != nil { | ||||||
|  | 						stats.Log("Aborting state snapshot generation", append(accountHash[:], storeIt.Key...)) | ||||||
|  | 						abort <- stats | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if time.Since(logged) > 8*time.Second { | ||||||
|  | 			stats.Log("Generating state snapshot", accIt.Key) | ||||||
|  | 			logged = time.Now() | ||||||
|  | 		} | ||||||
|  | 		// Some account processed, unmark the marker
 | ||||||
|  | 		accMarker = nil | ||||||
|  | 	} | ||||||
|  | 	// Snapshot fully generated, set the marker to nil
 | ||||||
|  | 	if batch.ValueSize() > 0 { | ||||||
|  | 		batch.Write() | ||||||
|  | 	} | ||||||
|  | 	log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, | ||||||
|  | 		"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start))) | ||||||
|  | 
 | ||||||
|  | 	dl.lock.Lock() | ||||||
|  | 	dl.genMarker = nil | ||||||
|  | 	close(dl.genPending) | ||||||
|  | 	dl.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Someone will be looking for us, wait it out
 | ||||||
|  | 	abort := <-dl.genAbort | ||||||
|  | 	abort <- nil | ||||||
|  | } | ||||||
							
								
								
									
										204
									
								
								core/state/snapshot/iterator.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										204
									
								
								core/state/snapshot/iterator.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,204 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"sort" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // AccountIterator is an iterator to step over all the accounts in a snapshot,
 | ||||||
|  | // which may or may npt be composed of multiple layers.
 | ||||||
|  | type AccountIterator interface { | ||||||
|  | 	// Next steps the iterator forward one element, returning false if exhausted,
 | ||||||
|  | 	// or an error if iteration failed for some reason (e.g. root being iterated
 | ||||||
|  | 	// becomes stale and garbage collected).
 | ||||||
|  | 	Next() bool | ||||||
|  | 
 | ||||||
|  | 	// Error returns any failure that occurred during iteration, which might have
 | ||||||
|  | 	// caused a premature iteration exit (e.g. snapshot stack becoming stale).
 | ||||||
|  | 	Error() error | ||||||
|  | 
 | ||||||
|  | 	// Hash returns the hash of the account the iterator is currently at.
 | ||||||
|  | 	Hash() common.Hash | ||||||
|  | 
 | ||||||
|  | 	// Account returns the RLP encoded slim account the iterator is currently at.
 | ||||||
|  | 	// An error will be returned if the iterator becomes invalid (e.g. snaph
 | ||||||
|  | 	Account() []byte | ||||||
|  | 
 | ||||||
|  | 	// Release releases associated resources. Release should always succeed and
 | ||||||
|  | 	// can be called multiple times without causing error.
 | ||||||
|  | 	Release() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // diffAccountIterator is an account iterator that steps over the accounts (both
 | ||||||
|  | // live and deleted) contained within a single diff layer. Higher order iterators
 | ||||||
|  | // will use the deleted accounts to skip deeper iterators.
 | ||||||
|  | type diffAccountIterator struct { | ||||||
|  | 	// curHash is the current hash the iterator is positioned on. The field is
 | ||||||
|  | 	// explicitly tracked since the referenced diff layer might go stale after
 | ||||||
|  | 	// the iterator was positioned and we don't want to fail accessing the old
 | ||||||
|  | 	// hash as long as the iterator is not touched any more.
 | ||||||
|  | 	curHash common.Hash | ||||||
|  | 
 | ||||||
|  | 	layer *diffLayer    // Live layer to retrieve values from
 | ||||||
|  | 	keys  []common.Hash // Keys left in the layer to iterate
 | ||||||
|  | 	fail  error         // Any failures encountered (stale)
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountIterator creates an account iterator over a single diff layer.
 | ||||||
|  | func (dl *diffLayer) AccountIterator(seek common.Hash) AccountIterator { | ||||||
|  | 	// Seek out the requested starting account
 | ||||||
|  | 	hashes := dl.AccountList() | ||||||
|  | 	index := sort.Search(len(hashes), func(i int) bool { | ||||||
|  | 		return bytes.Compare(seek[:], hashes[i][:]) < 0 | ||||||
|  | 	}) | ||||||
|  | 	// Assemble and returned the already seeked iterator
 | ||||||
|  | 	return &diffAccountIterator{ | ||||||
|  | 		layer: dl, | ||||||
|  | 		keys:  hashes[index:], | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Next steps the iterator forward one element, returning false if exhausted.
 | ||||||
|  | func (it *diffAccountIterator) Next() bool { | ||||||
|  | 	// If the iterator was already stale, consider it a programmer error. Although
 | ||||||
|  | 	// we could just return false here, triggering this path would probably mean
 | ||||||
|  | 	// somebody forgot to check for Error, so lets blow up instead of undefined
 | ||||||
|  | 	// behavior that's hard to debug.
 | ||||||
|  | 	if it.fail != nil { | ||||||
|  | 		panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail)) | ||||||
|  | 	} | ||||||
|  | 	// Stop iterating if all keys were exhausted
 | ||||||
|  | 	if len(it.keys) == 0 { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	if it.layer.Stale() { | ||||||
|  | 		it.fail, it.keys = ErrSnapshotStale, nil | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	// Iterator seems to be still alive, retrieve and cache the live hash
 | ||||||
|  | 	it.curHash = it.keys[0] | ||||||
|  | 	// key cached, shift the iterator and notify the user of success
 | ||||||
|  | 	it.keys = it.keys[1:] | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns any failure that occurred during iteration, which might have
 | ||||||
|  | // caused a premature iteration exit (e.g. snapshot stack becoming stale).
 | ||||||
|  | func (it *diffAccountIterator) Error() error { | ||||||
|  | 	return it.fail | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Hash returns the hash of the account the iterator is currently at.
 | ||||||
|  | func (it *diffAccountIterator) Hash() common.Hash { | ||||||
|  | 	return it.curHash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account returns the RLP encoded slim account the iterator is currently at.
 | ||||||
|  | // This method may _fail_, if the underlying layer has been flattened between
 | ||||||
|  | // the call to Next and Acccount. That type of error will set it.Err.
 | ||||||
|  | // This method assumes that flattening does not delete elements from
 | ||||||
|  | // the accountdata mapping (writing nil into it is fine though), and will panic
 | ||||||
|  | // if elements have been deleted.
 | ||||||
|  | func (it *diffAccountIterator) Account() []byte { | ||||||
|  | 	it.layer.lock.RLock() | ||||||
|  | 	blob, ok := it.layer.accountData[it.curHash] | ||||||
|  | 	if !ok { | ||||||
|  | 		if _, ok := it.layer.destructSet[it.curHash]; ok { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash)) | ||||||
|  | 	} | ||||||
|  | 	it.layer.lock.RUnlock() | ||||||
|  | 	if it.layer.Stale() { | ||||||
|  | 		it.fail, it.keys = ErrSnapshotStale, nil | ||||||
|  | 	} | ||||||
|  | 	return blob | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Release is a noop for diff account iterators as there are no held resources.
 | ||||||
|  | func (it *diffAccountIterator) Release() {} | ||||||
|  | 
 | ||||||
|  | // diskAccountIterator is an account iterator that steps over the live accounts
 | ||||||
|  | // contained within a disk layer.
 | ||||||
|  | type diskAccountIterator struct { | ||||||
|  | 	layer *diskLayer | ||||||
|  | 	it    ethdb.Iterator | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountIterator creates an account iterator over a disk layer.
 | ||||||
|  | func (dl *diskLayer) AccountIterator(seek common.Hash) AccountIterator { | ||||||
|  | 	// TODO: Fix seek position, or remove seek parameter
 | ||||||
|  | 	return &diskAccountIterator{ | ||||||
|  | 		layer: dl, | ||||||
|  | 		it:    dl.diskdb.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Next steps the iterator forward one element, returning false if exhausted.
 | ||||||
|  | func (it *diskAccountIterator) Next() bool { | ||||||
|  | 	// If the iterator was already exhausted, don't bother
 | ||||||
|  | 	if it.it == nil { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	// Try to advance the iterator and release it if we reached the end
 | ||||||
|  | 	for { | ||||||
|  | 		if !it.it.Next() || !bytes.HasPrefix(it.it.Key(), rawdb.SnapshotAccountPrefix) { | ||||||
|  | 			it.it.Release() | ||||||
|  | 			it.it = nil | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns any failure that occurred during iteration, which might have
 | ||||||
|  | // caused a premature iteration exit (e.g. snapshot stack becoming stale).
 | ||||||
|  | //
 | ||||||
|  | // A diff layer is immutable after creation content wise and can always be fully
 | ||||||
|  | // iterated without error, so this method always returns nil.
 | ||||||
|  | func (it *diskAccountIterator) Error() error { | ||||||
|  | 	return it.it.Error() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Hash returns the hash of the account the iterator is currently at.
 | ||||||
|  | func (it *diskAccountIterator) Hash() common.Hash { | ||||||
|  | 	return common.BytesToHash(it.it.Key()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account returns the RLP encoded slim account the iterator is currently at.
 | ||||||
|  | func (it *diskAccountIterator) Account() []byte { | ||||||
|  | 	return it.it.Value() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Release releases the database snapshot held during iteration.
 | ||||||
|  | func (it *diskAccountIterator) Release() { | ||||||
|  | 	// The iterator is auto-released on exhaustion, so make sure it's still alive
 | ||||||
|  | 	if it.it != nil { | ||||||
|  | 		it.it.Release() | ||||||
|  | 		it.it = nil | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										115
									
								
								core/state/snapshot/iterator_binary.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								core/state/snapshot/iterator_binary.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,115 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // binaryAccountIterator is a simplistic iterator to step over the accounts in
 | ||||||
|  | // a snapshot, which may or may npt be composed of multiple layers. Performance
 | ||||||
|  | // wise this iterator is slow, it's meant for cross validating the fast one,
 | ||||||
|  | type binaryAccountIterator struct { | ||||||
|  | 	a     *diffAccountIterator | ||||||
|  | 	b     AccountIterator | ||||||
|  | 	aDone bool | ||||||
|  | 	bDone bool | ||||||
|  | 	k     common.Hash | ||||||
|  | 	fail  error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newBinaryAccountIterator creates a simplistic account iterator to step over
 | ||||||
|  | // all the accounts in a slow, but eaily verifiable way.
 | ||||||
|  | func (dl *diffLayer) newBinaryAccountIterator() AccountIterator { | ||||||
|  | 	parent, ok := dl.parent.(*diffLayer) | ||||||
|  | 	if !ok { | ||||||
|  | 		// parent is the disk layer
 | ||||||
|  | 		return dl.AccountIterator(common.Hash{}) | ||||||
|  | 	} | ||||||
|  | 	l := &binaryAccountIterator{ | ||||||
|  | 		a: dl.AccountIterator(common.Hash{}).(*diffAccountIterator), | ||||||
|  | 		b: parent.newBinaryAccountIterator(), | ||||||
|  | 	} | ||||||
|  | 	l.aDone = !l.a.Next() | ||||||
|  | 	l.bDone = !l.b.Next() | ||||||
|  | 	return l | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Next steps the iterator forward one element, returning false if exhausted,
 | ||||||
|  | // or an error if iteration failed for some reason (e.g. root being iterated
 | ||||||
|  | // becomes stale and garbage collected).
 | ||||||
|  | func (it *binaryAccountIterator) Next() bool { | ||||||
|  | 	if it.aDone && it.bDone { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	nextB := it.b.Hash() | ||||||
|  | first: | ||||||
|  | 	nextA := it.a.Hash() | ||||||
|  | 	if it.aDone { | ||||||
|  | 		it.bDone = !it.b.Next() | ||||||
|  | 		it.k = nextB | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	if it.bDone { | ||||||
|  | 		it.aDone = !it.a.Next() | ||||||
|  | 		it.k = nextA | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 { | ||||||
|  | 		it.aDone = !it.a.Next() | ||||||
|  | 		it.k = nextA | ||||||
|  | 		return true | ||||||
|  | 	} else if diff == 0 { | ||||||
|  | 		// Now we need to advance one of them
 | ||||||
|  | 		it.aDone = !it.a.Next() | ||||||
|  | 		goto first | ||||||
|  | 	} | ||||||
|  | 	it.bDone = !it.b.Next() | ||||||
|  | 	it.k = nextB | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns any failure that occurred during iteration, which might have
 | ||||||
|  | // caused a premature iteration exit (e.g. snapshot stack becoming stale).
 | ||||||
|  | func (it *binaryAccountIterator) Error() error { | ||||||
|  | 	return it.fail | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Hash returns the hash of the account the iterator is currently at.
 | ||||||
|  | func (it *binaryAccountIterator) Hash() common.Hash { | ||||||
|  | 	return it.k | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account returns the RLP encoded slim account the iterator is currently at, or
 | ||||||
|  | // nil if the iterated snapshot stack became stale (you can check Error after
 | ||||||
|  | // to see if it failed or not).
 | ||||||
|  | func (it *binaryAccountIterator) Account() []byte { | ||||||
|  | 	blob, err := it.a.layer.AccountRLP(it.k) | ||||||
|  | 	if err != nil { | ||||||
|  | 		it.fail = err | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return blob | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Release recursively releases all the iterators in the stack.
 | ||||||
|  | func (it *binaryAccountIterator) Release() { | ||||||
|  | 	it.a.Release() | ||||||
|  | 	it.b.Release() | ||||||
|  | } | ||||||
							
								
								
									
										302
									
								
								core/state/snapshot/iterator_fast.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										302
									
								
								core/state/snapshot/iterator_fast.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,302 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"sort" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // weightedAccountIterator is an account iterator with an assigned weight. It is
 | ||||||
|  | // used to prioritise which account is the correct one if multiple iterators find
 | ||||||
|  | // the same one (modified in multiple consecutive blocks).
 | ||||||
|  | type weightedAccountIterator struct { | ||||||
|  | 	it       AccountIterator | ||||||
|  | 	priority int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // weightedAccountIterators is a set of iterators implementing the sort.Interface.
 | ||||||
|  | type weightedAccountIterators []*weightedAccountIterator | ||||||
|  | 
 | ||||||
|  | // Len implements sort.Interface, returning the number of active iterators.
 | ||||||
|  | func (its weightedAccountIterators) Len() int { return len(its) } | ||||||
|  | 
 | ||||||
|  | // Less implements sort.Interface, returning which of two iterators in the stack
 | ||||||
|  | // is before the other.
 | ||||||
|  | func (its weightedAccountIterators) Less(i, j int) bool { | ||||||
|  | 	// Order the iterators primarily by the account hashes
 | ||||||
|  | 	hashI := its[i].it.Hash() | ||||||
|  | 	hashJ := its[j].it.Hash() | ||||||
|  | 
 | ||||||
|  | 	switch bytes.Compare(hashI[:], hashJ[:]) { | ||||||
|  | 	case -1: | ||||||
|  | 		return true | ||||||
|  | 	case 1: | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	// Same account in multiple layers, split by priority
 | ||||||
|  | 	return its[i].priority < its[j].priority | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Swap implements sort.Interface, swapping two entries in the iterator stack.
 | ||||||
|  | func (its weightedAccountIterators) Swap(i, j int) { | ||||||
|  | 	its[i], its[j] = its[j], its[i] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // fastAccountIterator is a more optimized multi-layer iterator which maintains a
 | ||||||
|  | // direct mapping of all iterators leading down to the bottom layer.
 | ||||||
|  | type fastAccountIterator struct { | ||||||
|  | 	tree       *Tree       // Snapshot tree to reinitialize stale sub-iterators with
 | ||||||
|  | 	root       common.Hash // Root hash to reinitialize stale sub-iterators through
 | ||||||
|  | 	curAccount []byte | ||||||
|  | 
 | ||||||
|  | 	iterators weightedAccountIterators | ||||||
|  | 	initiated bool | ||||||
|  | 	fail      error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newFastAccountIterator creates a new hierarhical account iterator with one
 | ||||||
|  | // element per diff layer. The returned combo iterator can be used to walk over
 | ||||||
|  | // the entire snapshot diff stack simultaneously.
 | ||||||
|  | func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) { | ||||||
|  | 	snap := tree.Snapshot(root) | ||||||
|  | 	if snap == nil { | ||||||
|  | 		return nil, fmt.Errorf("unknown snapshot: %x", root) | ||||||
|  | 	} | ||||||
|  | 	fi := &fastAccountIterator{ | ||||||
|  | 		tree: tree, | ||||||
|  | 		root: root, | ||||||
|  | 	} | ||||||
|  | 	current := snap.(snapshot) | ||||||
|  | 	for depth := 0; current != nil; depth++ { | ||||||
|  | 		fi.iterators = append(fi.iterators, &weightedAccountIterator{ | ||||||
|  | 			it:       current.AccountIterator(seek), | ||||||
|  | 			priority: depth, | ||||||
|  | 		}) | ||||||
|  | 		current = current.Parent() | ||||||
|  | 	} | ||||||
|  | 	fi.init() | ||||||
|  | 	return fi, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // init walks over all the iterators and resolves any clashes between them, after
 | ||||||
|  | // which it prepares the stack for step-by-step iteration.
 | ||||||
|  | func (fi *fastAccountIterator) init() { | ||||||
|  | 	// Track which account hashes are iterators positioned on
 | ||||||
|  | 	var positioned = make(map[common.Hash]int) | ||||||
|  | 
 | ||||||
|  | 	// Position all iterators and track how many remain live
 | ||||||
|  | 	for i := 0; i < len(fi.iterators); i++ { | ||||||
|  | 		// Retrieve the first element and if it clashes with a previous iterator,
 | ||||||
|  | 		// advance either the current one or the old one. Repeat until nothing is
 | ||||||
|  | 		// clashing any more.
 | ||||||
|  | 		it := fi.iterators[i] | ||||||
|  | 		for { | ||||||
|  | 			// If the iterator is exhausted, drop it off the end
 | ||||||
|  | 			if !it.it.Next() { | ||||||
|  | 				it.it.Release() | ||||||
|  | 				last := len(fi.iterators) - 1 | ||||||
|  | 
 | ||||||
|  | 				fi.iterators[i] = fi.iterators[last] | ||||||
|  | 				fi.iterators[last] = nil | ||||||
|  | 				fi.iterators = fi.iterators[:last] | ||||||
|  | 
 | ||||||
|  | 				i-- | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 			// The iterator is still alive, check for collisions with previous ones
 | ||||||
|  | 			hash := it.it.Hash() | ||||||
|  | 			if other, exist := positioned[hash]; !exist { | ||||||
|  | 				positioned[hash] = i | ||||||
|  | 				break | ||||||
|  | 			} else { | ||||||
|  | 				// Iterators collide, one needs to be progressed, use priority to
 | ||||||
|  | 				// determine which.
 | ||||||
|  | 				//
 | ||||||
|  | 				// This whole else-block can be avoided, if we instead
 | ||||||
|  | 				// do an initial priority-sort of the iterators. If we do that,
 | ||||||
|  | 				// then we'll only wind up here if a lower-priority (preferred) iterator
 | ||||||
|  | 				// has the same value, and then we will always just continue.
 | ||||||
|  | 				// However, it costs an extra sort, so it's probably not better
 | ||||||
|  | 				if fi.iterators[other].priority < it.priority { | ||||||
|  | 					// The 'it' should be progressed
 | ||||||
|  | 					continue | ||||||
|  | 				} else { | ||||||
|  | 					// The 'other' should be progressed, swap them
 | ||||||
|  | 					it = fi.iterators[other] | ||||||
|  | 					fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other] | ||||||
|  | 					continue | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Re-sort the entire list
 | ||||||
|  | 	sort.Sort(fi.iterators) | ||||||
|  | 	fi.initiated = false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Next steps the iterator forward one element, returning false if exhausted.
 | ||||||
|  | func (fi *fastAccountIterator) Next() bool { | ||||||
|  | 	if len(fi.iterators) == 0 { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	if !fi.initiated { | ||||||
|  | 		// Don't forward first time -- we had to 'Next' once in order to
 | ||||||
|  | 		// do the sorting already
 | ||||||
|  | 		fi.initiated = true | ||||||
|  | 		fi.curAccount = fi.iterators[0].it.Account() | ||||||
|  | 		if innerErr := fi.iterators[0].it.Error(); innerErr != nil { | ||||||
|  | 			fi.fail = innerErr | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		if fi.curAccount != nil { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 		// Implicit else: we've hit a nil-account, and need to fall through to the
 | ||||||
|  | 		// loop below to land on something non-nil
 | ||||||
|  | 	} | ||||||
|  | 	// If an account is deleted in one of the layers, the key will still be there,
 | ||||||
|  | 	// but the actual value will be nil. However, the iterator should not
 | ||||||
|  | 	// export nil-values (but instead simply omit the key), so we need to loop
 | ||||||
|  | 	// here until we either
 | ||||||
|  | 	//  - get a non-nil value,
 | ||||||
|  | 	//  - hit an error,
 | ||||||
|  | 	//  - or exhaust the iterator
 | ||||||
|  | 	for { | ||||||
|  | 		if !fi.next(0) { | ||||||
|  | 			return false // exhausted
 | ||||||
|  | 		} | ||||||
|  | 		fi.curAccount = fi.iterators[0].it.Account() | ||||||
|  | 		if innerErr := fi.iterators[0].it.Error(); innerErr != nil { | ||||||
|  | 			fi.fail = innerErr | ||||||
|  | 			return false // error
 | ||||||
|  | 		} | ||||||
|  | 		if fi.curAccount != nil { | ||||||
|  | 			break // non-nil value found
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // next handles the next operation internally and should be invoked when we know
 | ||||||
|  | // that two elements in the list may have the same value.
 | ||||||
|  | //
 | ||||||
|  | // For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
 | ||||||
|  | // invoke next(3), which will call Next on elem 3 (the second '5') and will
 | ||||||
|  | // cascade along the list, applying the same operation if needed.
 | ||||||
|  | func (fi *fastAccountIterator) next(idx int) bool { | ||||||
|  | 	// If this particular iterator got exhausted, remove it and return true (the
 | ||||||
|  | 	// next one is surely not exhausted yet, otherwise it would have been removed
 | ||||||
|  | 	// already).
 | ||||||
|  | 	if it := fi.iterators[idx].it; !it.Next() { | ||||||
|  | 		it.Release() | ||||||
|  | 
 | ||||||
|  | 		fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...) | ||||||
|  | 		return len(fi.iterators) > 0 | ||||||
|  | 	} | ||||||
|  | 	// If there's noone left to cascade into, return
 | ||||||
|  | 	if idx == len(fi.iterators)-1 { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	// We next-ed the iterator at 'idx', now we may have to re-sort that element
 | ||||||
|  | 	var ( | ||||||
|  | 		cur, next         = fi.iterators[idx], fi.iterators[idx+1] | ||||||
|  | 		curHash, nextHash = cur.it.Hash(), next.it.Hash() | ||||||
|  | 	) | ||||||
|  | 	if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 { | ||||||
|  | 		// It is still in correct place
 | ||||||
|  | 		return true | ||||||
|  | 	} else if diff == 0 && cur.priority < next.priority { | ||||||
|  | 		// So still in correct place, but we need to iterate on the next
 | ||||||
|  | 		fi.next(idx + 1) | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	// At this point, the iterator is in the wrong location, but the remaining
 | ||||||
|  | 	// list is sorted. Find out where to move the item.
 | ||||||
|  | 	clash := -1 | ||||||
|  | 	index := sort.Search(len(fi.iterators), func(n int) bool { | ||||||
|  | 		// The iterator always advances forward, so anything before the old slot
 | ||||||
|  | 		// is known to be behind us, so just skip them altogether. This actually
 | ||||||
|  | 		// is an important clause since the sort order got invalidated.
 | ||||||
|  | 		if n < idx { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		if n == len(fi.iterators)-1 { | ||||||
|  | 			// Can always place an elem last
 | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 		nextHash := fi.iterators[n+1].it.Hash() | ||||||
|  | 		if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 { | ||||||
|  | 			return true | ||||||
|  | 		} else if diff > 0 { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		// The elem we're placing it next to has the same value,
 | ||||||
|  | 		// so whichever winds up on n+1 will need further iteraton
 | ||||||
|  | 		clash = n + 1 | ||||||
|  | 
 | ||||||
|  | 		return cur.priority < fi.iterators[n+1].priority | ||||||
|  | 	}) | ||||||
|  | 	fi.move(idx, index) | ||||||
|  | 	if clash != -1 { | ||||||
|  | 		fi.next(clash) | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // move advances an iterator to another position in the list.
 | ||||||
|  | func (fi *fastAccountIterator) move(index, newpos int) { | ||||||
|  | 	elem := fi.iterators[index] | ||||||
|  | 	copy(fi.iterators[index:], fi.iterators[index+1:newpos+1]) | ||||||
|  | 	fi.iterators[newpos] = elem | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns any failure that occurred during iteration, which might have
 | ||||||
|  | // caused a premature iteration exit (e.g. snapshot stack becoming stale).
 | ||||||
|  | func (fi *fastAccountIterator) Error() error { | ||||||
|  | 	return fi.fail | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Hash returns the current key
 | ||||||
|  | func (fi *fastAccountIterator) Hash() common.Hash { | ||||||
|  | 	return fi.iterators[0].it.Hash() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Account returns the current key
 | ||||||
|  | func (fi *fastAccountIterator) Account() []byte { | ||||||
|  | 	return fi.curAccount | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Release iterates over all the remaining live layer iterators and releases each
 | ||||||
|  | // of thme individually.
 | ||||||
|  | func (fi *fastAccountIterator) Release() { | ||||||
|  | 	for _, it := range fi.iterators { | ||||||
|  | 		it.it.Release() | ||||||
|  | 	} | ||||||
|  | 	fi.iterators = nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Debug is a convencience helper during testing
 | ||||||
|  | func (fi *fastAccountIterator) Debug() { | ||||||
|  | 	for _, it := range fi.iterators { | ||||||
|  | 		fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0]) | ||||||
|  | 	} | ||||||
|  | 	fmt.Println() | ||||||
|  | } | ||||||
							
								
								
									
										662
									
								
								core/state/snapshot/iterator_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										662
									
								
								core/state/snapshot/iterator_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,662 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"fmt" | ||||||
|  | 	"math/rand" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // TestAccountIteratorBasics tests some simple single-layer iteration
 | ||||||
|  | func TestAccountIteratorBasics(t *testing.T) { | ||||||
|  | 	var ( | ||||||
|  | 		destructs = make(map[common.Hash]struct{}) | ||||||
|  | 		accounts  = make(map[common.Hash][]byte) | ||||||
|  | 		storage   = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 	) | ||||||
|  | 	// Fill up a parent
 | ||||||
|  | 	for i := 0; i < 100; i++ { | ||||||
|  | 		h := randomHash() | ||||||
|  | 		data := randomAccount() | ||||||
|  | 
 | ||||||
|  | 		accounts[h] = data | ||||||
|  | 		if rand.Intn(4) == 0 { | ||||||
|  | 			destructs[h] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 		if rand.Intn(2) == 0 { | ||||||
|  | 			accStorage := make(map[common.Hash][]byte) | ||||||
|  | 			value := make([]byte, 32) | ||||||
|  | 			rand.Read(value) | ||||||
|  | 			accStorage[randomHash()] = value | ||||||
|  | 			storage[h] = accStorage | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Add some (identical) layers on top
 | ||||||
|  | 	parent := newDiffLayer(emptyLayer(), common.Hash{}, copyDestructs(destructs), copyAccounts(accounts), copyStorage(storage)) | ||||||
|  | 	it := parent.AccountIterator(common.Hash{}) | ||||||
|  | 	verifyIterator(t, 100, it) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type testIterator struct { | ||||||
|  | 	values []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newTestIterator(values ...byte) *testIterator { | ||||||
|  | 	return &testIterator{values} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Seek(common.Hash) { | ||||||
|  | 	panic("implement me") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Next() bool { | ||||||
|  | 	ti.values = ti.values[1:] | ||||||
|  | 	return len(ti.values) > 0 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Error() error { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Hash() common.Hash { | ||||||
|  | 	return common.BytesToHash([]byte{ti.values[0]}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Account() []byte { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ti *testIterator) Release() {} | ||||||
|  | 
 | ||||||
|  | func TestFastIteratorBasics(t *testing.T) { | ||||||
|  | 	type testCase struct { | ||||||
|  | 		lists   [][]byte | ||||||
|  | 		expKeys []byte | ||||||
|  | 	} | ||||||
|  | 	for i, tc := range []testCase{ | ||||||
|  | 		{lists: [][]byte{{0, 1, 8}, {1, 2, 8}, {2, 9}, {4}, | ||||||
|  | 			{7, 14, 15}, {9, 13, 15, 16}}, | ||||||
|  | 			expKeys: []byte{0, 1, 2, 4, 7, 8, 9, 13, 14, 15, 16}}, | ||||||
|  | 		{lists: [][]byte{{0, 8}, {1, 2, 8}, {7, 14, 15}, {8, 9}, | ||||||
|  | 			{9, 10}, {10, 13, 15, 16}}, | ||||||
|  | 			expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}}, | ||||||
|  | 	} { | ||||||
|  | 		var iterators []*weightedAccountIterator | ||||||
|  | 		for i, data := range tc.lists { | ||||||
|  | 			it := newTestIterator(data...) | ||||||
|  | 			iterators = append(iterators, &weightedAccountIterator{it, i}) | ||||||
|  | 
 | ||||||
|  | 		} | ||||||
|  | 		fi := &fastAccountIterator{ | ||||||
|  | 			iterators: iterators, | ||||||
|  | 			initiated: false, | ||||||
|  | 		} | ||||||
|  | 		count := 0 | ||||||
|  | 		for fi.Next() { | ||||||
|  | 			if got, exp := fi.Hash()[31], tc.expKeys[count]; exp != got { | ||||||
|  | 				t.Errorf("tc %d, [%d]: got %d exp %d", i, count, got, exp) | ||||||
|  | 			} | ||||||
|  | 			count++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func verifyIterator(t *testing.T, expCount int, it AccountIterator) { | ||||||
|  | 	t.Helper() | ||||||
|  | 
 | ||||||
|  | 	var ( | ||||||
|  | 		count = 0 | ||||||
|  | 		last  = common.Hash{} | ||||||
|  | 	) | ||||||
|  | 	for it.Next() { | ||||||
|  | 		hash := it.Hash() | ||||||
|  | 		if bytes.Compare(last[:], hash[:]) >= 0 { | ||||||
|  | 			t.Errorf("wrong order: %x >= %x", last, hash) | ||||||
|  | 		} | ||||||
|  | 		if it.Account() == nil { | ||||||
|  | 			t.Errorf("iterator returned nil-value for hash %x", hash) | ||||||
|  | 		} | ||||||
|  | 		count++ | ||||||
|  | 	} | ||||||
|  | 	if count != expCount { | ||||||
|  | 		t.Errorf("iterator count mismatch: have %d, want %d", count, expCount) | ||||||
|  | 	} | ||||||
|  | 	if err := it.Error(); err != nil { | ||||||
|  | 		t.Errorf("iterator failed: %v", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestAccountIteratorTraversal tests some simple multi-layer iteration.
 | ||||||
|  | func TestAccountIteratorTraversal(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Stack three diff layers on top with various overlaps
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, | ||||||
|  | 		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, | ||||||
|  | 		randomAccountSet("0xbb", "0xdd", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, | ||||||
|  | 		randomAccountSet("0xcc", "0xf0", "0xff"), nil) | ||||||
|  | 
 | ||||||
|  | 	// Verify the single and multi-layer iterators
 | ||||||
|  | 	head := snaps.Snapshot(common.HexToHash("0x04")) | ||||||
|  | 
 | ||||||
|  | 	verifyIterator(t, 3, head.(snapshot).AccountIterator(common.Hash{})) | ||||||
|  | 	verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator()) | ||||||
|  | 
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	verifyIterator(t, 7, it) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
 | ||||||
|  | // also expect the correct values to show up.
 | ||||||
|  | func TestAccountIteratorTraversalValues(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Create a batch of account sets to seed subsequent layers with
 | ||||||
|  | 	var ( | ||||||
|  | 		a = make(map[common.Hash][]byte) | ||||||
|  | 		b = make(map[common.Hash][]byte) | ||||||
|  | 		c = make(map[common.Hash][]byte) | ||||||
|  | 		d = make(map[common.Hash][]byte) | ||||||
|  | 		e = make(map[common.Hash][]byte) | ||||||
|  | 		f = make(map[common.Hash][]byte) | ||||||
|  | 		g = make(map[common.Hash][]byte) | ||||||
|  | 		h = make(map[common.Hash][]byte) | ||||||
|  | 	) | ||||||
|  | 	for i := byte(2); i < 0xff; i++ { | ||||||
|  | 		a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i)) | ||||||
|  | 		if i > 20 && i%2 == 0 { | ||||||
|  | 			b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i)) | ||||||
|  | 		} | ||||||
|  | 		if i%4 == 0 { | ||||||
|  | 			c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i)) | ||||||
|  | 		} | ||||||
|  | 		if i%7 == 0 { | ||||||
|  | 			d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i)) | ||||||
|  | 		} | ||||||
|  | 		if i%8 == 0 { | ||||||
|  | 			e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i)) | ||||||
|  | 		} | ||||||
|  | 		if i > 50 || i < 85 { | ||||||
|  | 			f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i)) | ||||||
|  | 		} | ||||||
|  | 		if i%64 == 0 { | ||||||
|  | 			g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i)) | ||||||
|  | 		} | ||||||
|  | 		if i%128 == 0 { | ||||||
|  | 			h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Assemble a stack of snapshots from the account layers
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, a, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, b, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, c, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), nil, d, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), nil, e, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), nil, f, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), nil, g, nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), nil, h, nil) | ||||||
|  | 
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	head := snaps.Snapshot(common.HexToHash("0x09")) | ||||||
|  | 	for it.Next() { | ||||||
|  | 		hash := it.Hash() | ||||||
|  | 		want, err := head.AccountRLP(hash) | ||||||
|  | 		if err != nil { | ||||||
|  | 			t.Fatalf("failed to retrieve expected account: %v", err) | ||||||
|  | 		} | ||||||
|  | 		if have := it.Account(); !bytes.Equal(want, have) { | ||||||
|  | 			t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This testcase is notorious, all layers contain the exact same 200 accounts.
 | ||||||
|  | func TestAccountIteratorLargeTraversal(t *testing.T) { | ||||||
|  | 	// Create a custom account factory to recreate the same addresses
 | ||||||
|  | 	makeAccounts := func(num int) map[common.Hash][]byte { | ||||||
|  | 		accounts := make(map[common.Hash][]byte) | ||||||
|  | 		for i := 0; i < num; i++ { | ||||||
|  | 			h := common.Hash{} | ||||||
|  | 			binary.BigEndian.PutUint64(h[:], uint64(i+1)) | ||||||
|  | 			accounts[h] = randomAccount() | ||||||
|  | 		} | ||||||
|  | 		return accounts | ||||||
|  | 	} | ||||||
|  | 	// Build up a large stack of snapshots
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	for i := 1; i < 128; i++ { | ||||||
|  | 		snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) | ||||||
|  | 	} | ||||||
|  | 	// Iterate the entire stack and ensure everything is hit only once
 | ||||||
|  | 	head := snaps.Snapshot(common.HexToHash("0x80")) | ||||||
|  | 	verifyIterator(t, 200, head.(snapshot).AccountIterator(common.Hash{})) | ||||||
|  | 	verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator()) | ||||||
|  | 
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x80"), common.Hash{}) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	verifyIterator(t, 200, it) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestAccountIteratorFlattening tests what happens when we
 | ||||||
|  | // - have a live iterator on child C (parent C1 -> C2 .. CN)
 | ||||||
|  | // - flattens C2 all the way into CN
 | ||||||
|  | // - continues iterating
 | ||||||
|  | func TestAccountIteratorFlattening(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Create a stack of diffs on top
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, | ||||||
|  | 		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, | ||||||
|  | 		randomAccountSet("0xbb", "0xdd", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, | ||||||
|  | 		randomAccountSet("0xcc", "0xf0", "0xff"), nil) | ||||||
|  | 
 | ||||||
|  | 	// Create an iterator and flatten the data from underneath it
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { | ||||||
|  | 		t.Fatalf("failed to flatten snapshot stack: %v", err) | ||||||
|  | 	} | ||||||
|  | 	//verifyIterator(t, 7, it)
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestAccountIteratorSeek(t *testing.T) { | ||||||
|  | 	// Create a snapshot stack with some initial data
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, | ||||||
|  | 		randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, | ||||||
|  | 		randomAccountSet("0xbb", "0xdd", "0xf0"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, | ||||||
|  | 		randomAccountSet("0xcc", "0xf0", "0xff"), nil) | ||||||
|  | 
 | ||||||
|  | 	// Construct various iterators and ensure their tranversal is correct
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 3, it) // expected: ee, f0, ff
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 3, it) // expected: ee, f0, ff
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 0, it) // expected: nothing
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 5, it) // expected: cc, dd, ee, f0, ff
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 2, it) // expected: f0, ff
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 1, it) // expected: ff
 | ||||||
|  | 
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff")) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	verifyIterator(t, 0, it) // expected: nothing
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestIteratorDeletions tests that the iterator behaves correct when there are
 | ||||||
|  | // deleted accounts (where the Account() value is nil). The iterator
 | ||||||
|  | // should not output any accounts or nil-values for those cases.
 | ||||||
|  | func TestIteratorDeletions(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Stack three diff layers on top with various overlaps
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), | ||||||
|  | 		nil, randomAccountSet("0x11", "0x22", "0x33"), nil) | ||||||
|  | 
 | ||||||
|  | 	deleted := common.HexToHash("0x22") | ||||||
|  | 	destructed := map[common.Hash]struct{}{ | ||||||
|  | 		deleted: struct{}{}, | ||||||
|  | 	} | ||||||
|  | 	snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), | ||||||
|  | 		destructed, randomAccountSet("0x11", "0x33"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), | ||||||
|  | 		nil, randomAccountSet("0x33", "0x44", "0x55"), nil) | ||||||
|  | 
 | ||||||
|  | 	// The output should be 11,33,44,55
 | ||||||
|  | 	it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) | ||||||
|  | 	// Do a quick check
 | ||||||
|  | 	verifyIterator(t, 4, it) | ||||||
|  | 	it.Release() | ||||||
|  | 
 | ||||||
|  | 	// And a more detailed verification that we indeed do not see '0x22'
 | ||||||
|  | 	it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) | ||||||
|  | 	defer it.Release() | ||||||
|  | 	for it.Next() { | ||||||
|  | 		hash := it.Hash() | ||||||
|  | 		if it.Account() == nil { | ||||||
|  | 			t.Errorf("iterator returned nil-value for hash %x", hash) | ||||||
|  | 		} | ||||||
|  | 		if hash == deleted { | ||||||
|  | 			t.Errorf("expected deleted elem %x to not be returned by iterator", deleted) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BenchmarkAccountIteratorTraversal is a bit a bit notorious -- all layers contain the
 | ||||||
|  | // exact same 200 accounts. That means that we need to process 2000 items, but
 | ||||||
|  | // only spit out 200 values eventually.
 | ||||||
|  | //
 | ||||||
|  | // The value-fetching benchmark is easy on the binary iterator, since it never has to reach
 | ||||||
|  | // down at any depth for retrieving the values -- all are on the toppmost layer
 | ||||||
|  | //
 | ||||||
|  | // BenchmarkAccountIteratorTraversal/binary_iterator_keys-6         	    2239	    483674 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorTraversal/binary_iterator_values-6       	    2403	    501810 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorTraversal/fast_iterator_keys-6           	    1923	    677966 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorTraversal/fast_iterator_values-6         	    1741	    649967 ns/op
 | ||||||
|  | func BenchmarkAccountIteratorTraversal(b *testing.B) { | ||||||
|  | 	// Create a custom account factory to recreate the same addresses
 | ||||||
|  | 	makeAccounts := func(num int) map[common.Hash][]byte { | ||||||
|  | 		accounts := make(map[common.Hash][]byte) | ||||||
|  | 		for i := 0; i < num; i++ { | ||||||
|  | 			h := common.Hash{} | ||||||
|  | 			binary.BigEndian.PutUint64(h[:], uint64(i+1)) | ||||||
|  | 			accounts[h] = randomAccount() | ||||||
|  | 		} | ||||||
|  | 		return accounts | ||||||
|  | 	} | ||||||
|  | 	// Build up a large stack of snapshots
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	for i := 1; i <= 100; i++ { | ||||||
|  | 		snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(200), nil) | ||||||
|  | 	} | ||||||
|  | 	// We call this once before the benchmark, so the creation of
 | ||||||
|  | 	// sorted accountlists are not included in the results.
 | ||||||
|  | 	head := snaps.Snapshot(common.HexToHash("0x65")) | ||||||
|  | 	head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 
 | ||||||
|  | 	b.Run("binary iterator keys", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			got := 0 | ||||||
|  | 			it := head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 			} | ||||||
|  | 			if exp := 200; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("binary iterator values", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			got := 0 | ||||||
|  | 			it := head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 				head.(*diffLayer).accountRLP(it.Hash(), 0) | ||||||
|  | 			} | ||||||
|  | 			if exp := 200; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("fast iterator keys", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) | ||||||
|  | 			defer it.Release() | ||||||
|  | 
 | ||||||
|  | 			got := 0 | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 			} | ||||||
|  | 			if exp := 200; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("fast iterator values", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) | ||||||
|  | 			defer it.Release() | ||||||
|  | 
 | ||||||
|  | 			got := 0 | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 				it.Account() | ||||||
|  | 			} | ||||||
|  | 			if exp := 200; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where
 | ||||||
|  | // the baselayer is a lot larger than the upper layer.
 | ||||||
|  | //
 | ||||||
|  | // This is heavy on the binary iterator, which in most cases will have to
 | ||||||
|  | // call recursively 100 times for the majority of the values
 | ||||||
|  | //
 | ||||||
|  | // BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6         	     514	   1971999 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6       	      61	  18997492 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6           	   10000	    114385 ns/op
 | ||||||
|  | // BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6         	    4047	    296823 ns/op
 | ||||||
|  | func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) { | ||||||
|  | 	// Create a custom account factory to recreate the same addresses
 | ||||||
|  | 	makeAccounts := func(num int) map[common.Hash][]byte { | ||||||
|  | 		accounts := make(map[common.Hash][]byte) | ||||||
|  | 		for i := 0; i < num; i++ { | ||||||
|  | 			h := common.Hash{} | ||||||
|  | 			binary.BigEndian.PutUint64(h[:], uint64(i+1)) | ||||||
|  | 			accounts[h] = randomAccount() | ||||||
|  | 		} | ||||||
|  | 		return accounts | ||||||
|  | 	} | ||||||
|  | 	// Build up a large stack of snapshots
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, makeAccounts(2000), nil) | ||||||
|  | 	for i := 2; i <= 100; i++ { | ||||||
|  | 		snaps.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), nil, makeAccounts(20), nil) | ||||||
|  | 	} | ||||||
|  | 	// We call this once before the benchmark, so the creation of
 | ||||||
|  | 	// sorted accountlists are not included in the results.
 | ||||||
|  | 	head := snaps.Snapshot(common.HexToHash("0x65")) | ||||||
|  | 	head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 
 | ||||||
|  | 	b.Run("binary iterator (keys)", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			got := 0 | ||||||
|  | 			it := head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 			} | ||||||
|  | 			if exp := 2000; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("binary iterator (values)", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			got := 0 | ||||||
|  | 			it := head.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 				v := it.Hash() | ||||||
|  | 				head.(*diffLayer).accountRLP(v, 0) | ||||||
|  | 			} | ||||||
|  | 			if exp := 2000; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("fast iterator (keys)", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) | ||||||
|  | 			defer it.Release() | ||||||
|  | 
 | ||||||
|  | 			got := 0 | ||||||
|  | 			for it.Next() { | ||||||
|  | 				got++ | ||||||
|  | 			} | ||||||
|  | 			if exp := 2000; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | 	b.Run("fast iterator (values)", func(b *testing.B) { | ||||||
|  | 		for i := 0; i < b.N; i++ { | ||||||
|  | 			it, _ := snaps.AccountIterator(common.HexToHash("0x65"), common.Hash{}) | ||||||
|  | 			defer it.Release() | ||||||
|  | 
 | ||||||
|  | 			got := 0 | ||||||
|  | 			for it.Next() { | ||||||
|  | 				it.Account() | ||||||
|  | 				got++ | ||||||
|  | 			} | ||||||
|  | 			if exp := 2000; got != exp { | ||||||
|  | 				b.Errorf("iterator len wrong, expected %d, got %d", exp, got) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | func BenchmarkBinaryAccountIteration(b *testing.B) { | ||||||
|  | 	benchmarkAccountIteration(b, func(snap snapshot) AccountIterator { | ||||||
|  | 		return snap.(*diffLayer).newBinaryAccountIterator() | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func BenchmarkFastAccountIteration(b *testing.B) { | ||||||
|  | 	benchmarkAccountIteration(b, newFastAccountIterator) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) AccountIterator) { | ||||||
|  | 	// Create a diff stack and randomize the accounts across them
 | ||||||
|  | 	layers := make([]map[common.Hash][]byte, 128) | ||||||
|  | 	for i := 0; i < len(layers); i++ { | ||||||
|  | 		layers[i] = make(map[common.Hash][]byte) | ||||||
|  | 	} | ||||||
|  | 	for i := 0; i < b.N; i++ { | ||||||
|  | 		depth := rand.Intn(len(layers)) | ||||||
|  | 		layers[depth][randomHash()] = randomAccount() | ||||||
|  | 	} | ||||||
|  | 	stack := snapshot(emptyLayer()) | ||||||
|  | 	for _, layer := range layers { | ||||||
|  | 		stack = stack.Update(common.Hash{}, layer, nil, nil) | ||||||
|  | 	} | ||||||
|  | 	// Reset the timers and report all the stats
 | ||||||
|  | 	it := iterator(stack) | ||||||
|  | 
 | ||||||
|  | 	b.ResetTimer() | ||||||
|  | 	b.ReportAllocs() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | */ | ||||||
							
								
								
									
										262
									
								
								core/state/snapshot/journal.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										262
									
								
								core/state/snapshot/journal.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,262 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/binary" | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/log" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | 	"github.com/ethereum/go-ethereum/trie" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // journalGenerator is a disk layer entry containing the generator progress marker.
 | ||||||
|  | type journalGenerator struct { | ||||||
|  | 	Wiping   bool // Whether the database was in progress of being wiped
 | ||||||
|  | 	Done     bool // Whether the generator finished creating the snapshot
 | ||||||
|  | 	Marker   []byte | ||||||
|  | 	Accounts uint64 | ||||||
|  | 	Slots    uint64 | ||||||
|  | 	Storage  uint64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // journalDestruct is an account deletion entry in a diffLayer's disk journal.
 | ||||||
|  | type journalDestruct struct { | ||||||
|  | 	Hash common.Hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // journalAccount is an account entry in a diffLayer's disk journal.
 | ||||||
|  | type journalAccount struct { | ||||||
|  | 	Hash common.Hash | ||||||
|  | 	Blob []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // journalStorage is an account's storage map in a diffLayer's disk journal.
 | ||||||
|  | type journalStorage struct { | ||||||
|  | 	Hash common.Hash | ||||||
|  | 	Keys []common.Hash | ||||||
|  | 	Vals [][]byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
 | ||||||
|  | func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) { | ||||||
|  | 	// Retrieve the block number and hash of the snapshot, failing if no snapshot
 | ||||||
|  | 	// is present in the database (or crashed mid-update).
 | ||||||
|  | 	baseRoot := rawdb.ReadSnapshotRoot(diskdb) | ||||||
|  | 	if baseRoot == (common.Hash{}) { | ||||||
|  | 		return nil, errors.New("missing or corrupted snapshot") | ||||||
|  | 	} | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: diskdb, | ||||||
|  | 		triedb: triedb, | ||||||
|  | 		cache:  fastcache.New(cache * 1024 * 1024), | ||||||
|  | 		root:   baseRoot, | ||||||
|  | 	} | ||||||
|  | 	// Retrieve the journal, it must exist since even for 0 layer it stores whether
 | ||||||
|  | 	// we've already generated the snapshot or are in progress only
 | ||||||
|  | 	journal := rawdb.ReadSnapshotJournal(diskdb) | ||||||
|  | 	if len(journal) == 0 { | ||||||
|  | 		return nil, errors.New("missing or corrupted snapshot journal") | ||||||
|  | 	} | ||||||
|  | 	r := rlp.NewStream(bytes.NewReader(journal), 0) | ||||||
|  | 
 | ||||||
|  | 	// Read the snapshot generation progress for the disk layer
 | ||||||
|  | 	var generator journalGenerator | ||||||
|  | 	if err := r.Decode(&generator); err != nil { | ||||||
|  | 		return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Load all the snapshot diffs from the journal
 | ||||||
|  | 	snapshot, err := loadDiffLayer(base, r) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	// Entire snapshot journal loaded, sanity check the head and return
 | ||||||
|  | 	// Journal doesn't exist, don't worry if it's not supposed to
 | ||||||
|  | 	if head := snapshot.Root(); head != root { | ||||||
|  | 		return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) | ||||||
|  | 	} | ||||||
|  | 	// Everything loaded correctly, resume any suspended operations
 | ||||||
|  | 	if !generator.Done { | ||||||
|  | 		// If the generator was still wiping, restart one from scratch (fine for
 | ||||||
|  | 		// now as it's rare and the wiper deletes the stuff it touches anyway, so
 | ||||||
|  | 		// restarting won't incur a lot of extra database hops.
 | ||||||
|  | 		var wiper chan struct{} | ||||||
|  | 		if generator.Wiping { | ||||||
|  | 			log.Info("Resuming previous snapshot wipe") | ||||||
|  | 			wiper = wipeSnapshot(diskdb, false) | ||||||
|  | 		} | ||||||
|  | 		// Whether or not wiping was in progress, load any generator progress too
 | ||||||
|  | 		base.genMarker = generator.Marker | ||||||
|  | 		if base.genMarker == nil { | ||||||
|  | 			base.genMarker = []byte{} | ||||||
|  | 		} | ||||||
|  | 		base.genPending = make(chan struct{}) | ||||||
|  | 		base.genAbort = make(chan chan *generatorStats) | ||||||
|  | 
 | ||||||
|  | 		var origin uint64 | ||||||
|  | 		if len(generator.Marker) >= 8 { | ||||||
|  | 			origin = binary.BigEndian.Uint64(generator.Marker) | ||||||
|  | 		} | ||||||
|  | 		go base.generate(&generatorStats{ | ||||||
|  | 			wiping:   wiper, | ||||||
|  | 			origin:   origin, | ||||||
|  | 			start:    time.Now(), | ||||||
|  | 			accounts: generator.Accounts, | ||||||
|  | 			slots:    generator.Slots, | ||||||
|  | 			storage:  common.StorageSize(generator.Storage), | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 	return snapshot, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
 | ||||||
|  | // diff and verifying that it can be linked to the requested parent.
 | ||||||
|  | func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { | ||||||
|  | 	// Read the next diff journal entry
 | ||||||
|  | 	var root common.Hash | ||||||
|  | 	if err := r.Decode(&root); err != nil { | ||||||
|  | 		// The first read may fail with EOF, marking the end of the journal
 | ||||||
|  | 		if err == io.EOF { | ||||||
|  | 			return parent, nil | ||||||
|  | 		} | ||||||
|  | 		return nil, fmt.Errorf("load diff root: %v", err) | ||||||
|  | 	} | ||||||
|  | 	var destructs []journalDestruct | ||||||
|  | 	if err := r.Decode(&destructs); err != nil { | ||||||
|  | 		return nil, fmt.Errorf("load diff destructs: %v", err) | ||||||
|  | 	} | ||||||
|  | 	destructSet := make(map[common.Hash]struct{}) | ||||||
|  | 	for _, entry := range destructs { | ||||||
|  | 		destructSet[entry.Hash] = struct{}{} | ||||||
|  | 	} | ||||||
|  | 	var accounts []journalAccount | ||||||
|  | 	if err := r.Decode(&accounts); err != nil { | ||||||
|  | 		return nil, fmt.Errorf("load diff accounts: %v", err) | ||||||
|  | 	} | ||||||
|  | 	accountData := make(map[common.Hash][]byte) | ||||||
|  | 	for _, entry := range accounts { | ||||||
|  | 		accountData[entry.Hash] = entry.Blob | ||||||
|  | 	} | ||||||
|  | 	var storage []journalStorage | ||||||
|  | 	if err := r.Decode(&storage); err != nil { | ||||||
|  | 		return nil, fmt.Errorf("load diff storage: %v", err) | ||||||
|  | 	} | ||||||
|  | 	storageData := make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 	for _, entry := range storage { | ||||||
|  | 		slots := make(map[common.Hash][]byte) | ||||||
|  | 		for i, key := range entry.Keys { | ||||||
|  | 			slots[key] = entry.Vals[i] | ||||||
|  | 		} | ||||||
|  | 		storageData[entry.Hash] = slots | ||||||
|  | 	} | ||||||
|  | 	return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Journal writes the persistent layer generator stats into a buffer to be stored
 | ||||||
|  | // in the database as the snapshot journal.
 | ||||||
|  | func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { | ||||||
|  | 	// If the snapshot is currently being generated, abort it
 | ||||||
|  | 	var stats *generatorStats | ||||||
|  | 	if dl.genAbort != nil { | ||||||
|  | 		abort := make(chan *generatorStats) | ||||||
|  | 		dl.genAbort <- abort | ||||||
|  | 
 | ||||||
|  | 		if stats = <-abort; stats != nil { | ||||||
|  | 			stats.Log("Journalling in-progress snapshot", dl.genMarker) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Ensure the layer didn't get stale
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	if dl.stale { | ||||||
|  | 		return common.Hash{}, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	// Write out the generator marker
 | ||||||
|  | 	entry := journalGenerator{ | ||||||
|  | 		Done:   dl.genMarker == nil, | ||||||
|  | 		Marker: dl.genMarker, | ||||||
|  | 	} | ||||||
|  | 	if stats != nil { | ||||||
|  | 		entry.Wiping = (stats.wiping != nil) | ||||||
|  | 		entry.Accounts = stats.accounts | ||||||
|  | 		entry.Slots = stats.slots | ||||||
|  | 		entry.Storage = uint64(stats.storage) | ||||||
|  | 	} | ||||||
|  | 	if err := rlp.Encode(buffer, entry); err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	return dl.root, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Journal writes the memory layer contents into a buffer to be stored in the
 | ||||||
|  | // database as the snapshot journal.
 | ||||||
|  | func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { | ||||||
|  | 	// Journal the parent first
 | ||||||
|  | 	base, err := dl.parent.Journal(buffer) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	// Ensure the layer didn't get stale
 | ||||||
|  | 	dl.lock.RLock() | ||||||
|  | 	defer dl.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	if dl.Stale() { | ||||||
|  | 		return common.Hash{}, ErrSnapshotStale | ||||||
|  | 	} | ||||||
|  | 	// Everything below was journalled, persist this layer too
 | ||||||
|  | 	if err := rlp.Encode(buffer, dl.root); err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	destructs := make([]journalDestruct, 0, len(dl.destructSet)) | ||||||
|  | 	for hash := range dl.destructSet { | ||||||
|  | 		destructs = append(destructs, journalDestruct{Hash: hash}) | ||||||
|  | 	} | ||||||
|  | 	if err := rlp.Encode(buffer, destructs); err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	accounts := make([]journalAccount, 0, len(dl.accountData)) | ||||||
|  | 	for hash, blob := range dl.accountData { | ||||||
|  | 		accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) | ||||||
|  | 	} | ||||||
|  | 	if err := rlp.Encode(buffer, accounts); err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	storage := make([]journalStorage, 0, len(dl.storageData)) | ||||||
|  | 	for hash, slots := range dl.storageData { | ||||||
|  | 		keys := make([]common.Hash, 0, len(slots)) | ||||||
|  | 		vals := make([][]byte, 0, len(slots)) | ||||||
|  | 		for key, val := range slots { | ||||||
|  | 			keys = append(keys, key) | ||||||
|  | 			vals = append(vals, val) | ||||||
|  | 		} | ||||||
|  | 		storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) | ||||||
|  | 	} | ||||||
|  | 	if err := rlp.Encode(buffer, storage); err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	return base, nil | ||||||
|  | } | ||||||
							
								
								
									
										603
									
								
								core/state/snapshot/snapshot.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										603
									
								
								core/state/snapshot/snapshot.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,603 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | // Package snapshot implements a journalled, dynamic state dump.
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"sync" | ||||||
|  | 	"sync/atomic" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/log" | ||||||
|  | 	"github.com/ethereum/go-ethereum/metrics" | ||||||
|  | 	"github.com/ethereum/go-ethereum/trie" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	snapshotCleanAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil) | ||||||
|  | 	snapshotCleanAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil) | ||||||
|  | 	snapshotCleanAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil) | ||||||
|  | 	snapshotCleanAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil) | ||||||
|  | 	snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotCleanStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil) | ||||||
|  | 	snapshotCleanStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil) | ||||||
|  | 	snapshotCleanStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil) | ||||||
|  | 	snapshotCleanStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil) | ||||||
|  | 	snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotDirtyAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil) | ||||||
|  | 	snapshotDirtyAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil) | ||||||
|  | 	snapshotDirtyAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil) | ||||||
|  | 	snapshotDirtyAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil) | ||||||
|  | 	snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotDirtyStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil) | ||||||
|  | 	snapshotDirtyStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil) | ||||||
|  | 	snapshotDirtyStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil) | ||||||
|  | 	snapshotDirtyStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil) | ||||||
|  | 	snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) | ||||||
|  | 	snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) | ||||||
|  | 
 | ||||||
|  | 	snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil) | ||||||
|  | 	snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil) | ||||||
|  | 	snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil) | ||||||
|  | 	snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil) | ||||||
|  | 	snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotBloomAccountTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil) | ||||||
|  | 	snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil) | ||||||
|  | 	snapshotBloomAccountMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil) | ||||||
|  | 
 | ||||||
|  | 	snapshotBloomStorageTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil) | ||||||
|  | 	snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil) | ||||||
|  | 	snapshotBloomStorageMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil) | ||||||
|  | 
 | ||||||
|  | 	// ErrSnapshotStale is returned from data accessors if the underlying snapshot
 | ||||||
|  | 	// layer had been invalidated due to the chain progressing forward far enough
 | ||||||
|  | 	// to not maintain the layer's original state.
 | ||||||
|  | 	ErrSnapshotStale = errors.New("snapshot stale") | ||||||
|  | 
 | ||||||
|  | 	// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
 | ||||||
|  | 	// is being generated currently and the requested data item is not yet in the
 | ||||||
|  | 	// range of accounts covered.
 | ||||||
|  | 	ErrNotCoveredYet = errors.New("not covered yet") | ||||||
|  | 
 | ||||||
|  | 	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
 | ||||||
|  | 	// that forms a cycle in the snapshot tree.
 | ||||||
|  | 	errSnapshotCycle = errors.New("snapshot cycle") | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Snapshot represents the functionality supported by a snapshot storage layer.
 | ||||||
|  | type Snapshot interface { | ||||||
|  | 	// Root returns the root hash for which this snapshot was made.
 | ||||||
|  | 	Root() common.Hash | ||||||
|  | 
 | ||||||
|  | 	// Account directly retrieves the account associated with a particular hash in
 | ||||||
|  | 	// the snapshot slim data format.
 | ||||||
|  | 	Account(hash common.Hash) (*Account, error) | ||||||
|  | 
 | ||||||
|  | 	// AccountRLP directly retrieves the account RLP associated with a particular
 | ||||||
|  | 	// hash in the snapshot slim data format.
 | ||||||
|  | 	AccountRLP(hash common.Hash) ([]byte, error) | ||||||
|  | 
 | ||||||
|  | 	// Storage directly retrieves the storage data associated with a particular hash,
 | ||||||
|  | 	// within a particular account.
 | ||||||
|  | 	Storage(accountHash, storageHash common.Hash) ([]byte, error) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // snapshot is the internal version of the snapshot data layer that supports some
 | ||||||
|  | // additional methods compared to the public API.
 | ||||||
|  | type snapshot interface { | ||||||
|  | 	Snapshot | ||||||
|  | 
 | ||||||
|  | 	// Parent returns the subsequent layer of a snapshot, or nil if the base was
 | ||||||
|  | 	// reached.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Note, the method is an internal helper to avoid type switching between the
 | ||||||
|  | 	// disk and diff layers. There is no locking involved.
 | ||||||
|  | 	Parent() snapshot | ||||||
|  | 
 | ||||||
|  | 	// Update creates a new layer on top of the existing snapshot diff tree with
 | ||||||
|  | 	// the specified data items.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Note, the maps are retained by the method to avoid copying everything.
 | ||||||
|  | 	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer | ||||||
|  | 
 | ||||||
|  | 	// Journal commits an entire diff hierarchy to disk into a single journal entry.
 | ||||||
|  | 	// This is meant to be used during shutdown to persist the snapshot without
 | ||||||
|  | 	// flattening everything down (bad for reorgs).
 | ||||||
|  | 	Journal(buffer *bytes.Buffer) (common.Hash, error) | ||||||
|  | 
 | ||||||
|  | 	// Stale return whether this layer has become stale (was flattened across) or
 | ||||||
|  | 	// if it's still live.
 | ||||||
|  | 	Stale() bool | ||||||
|  | 
 | ||||||
|  | 	// AccountIterator creates an account iterator over an arbitrary layer.
 | ||||||
|  | 	AccountIterator(seek common.Hash) AccountIterator | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
 | ||||||
|  | // base layer backed by a key-value store, on top of which arbitrarily many in-
 | ||||||
|  | // memory diff layers are topped. The memory diffs can form a tree with branching,
 | ||||||
|  | // but the disk layer is singleton and common to all. If a reorg goes deeper than
 | ||||||
|  | // the disk layer, everything needs to be deleted.
 | ||||||
|  | //
 | ||||||
|  | // The goal of a state snapshot is twofold: to allow direct access to account and
 | ||||||
|  | // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
 | ||||||
|  | // cheap iteration of the account/storage tries for sync aid.
 | ||||||
|  | type Tree struct { | ||||||
|  | 	diskdb ethdb.KeyValueStore      // Persistent database to store the snapshot
 | ||||||
|  | 	triedb *trie.Database           // In-memory cache to access the trie through
 | ||||||
|  | 	cache  int                      // Megabytes permitted to use for read caches
 | ||||||
|  | 	layers map[common.Hash]snapshot // Collection of all known layers
 | ||||||
|  | 	lock   sync.RWMutex | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New attempts to load an already existing snapshot from a persistent key-value
 | ||||||
|  | // store (with a number of memory layers from a journal), ensuring that the head
 | ||||||
|  | // of the snapshot matches the expected one.
 | ||||||
|  | //
 | ||||||
|  | // If the snapshot is missing or inconsistent, the entirety is deleted and will
 | ||||||
|  | // be reconstructed from scratch based on the tries in the key-value store, on a
 | ||||||
|  | // background thread.
 | ||||||
|  | func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree { | ||||||
|  | 	// Create a new, empty snapshot tree
 | ||||||
|  | 	snap := &Tree{ | ||||||
|  | 		diskdb: diskdb, | ||||||
|  | 		triedb: triedb, | ||||||
|  | 		cache:  cache, | ||||||
|  | 		layers: make(map[common.Hash]snapshot), | ||||||
|  | 	} | ||||||
|  | 	if !async { | ||||||
|  | 		defer snap.waitBuild() | ||||||
|  | 	} | ||||||
|  | 	// Attempt to load a previously persisted snapshot and rebuild one if failed
 | ||||||
|  | 	head, err := loadSnapshot(diskdb, triedb, cache, root) | ||||||
|  | 	if err != nil { | ||||||
|  | 		log.Warn("Failed to load snapshot, regenerating", "err", err) | ||||||
|  | 		snap.Rebuild(root) | ||||||
|  | 		return snap | ||||||
|  | 	} | ||||||
|  | 	// Existing snapshot loaded, seed all the layers
 | ||||||
|  | 	for head != nil { | ||||||
|  | 		snap.layers[head.Root()] = head | ||||||
|  | 		head = head.Parent() | ||||||
|  | 	} | ||||||
|  | 	return snap | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
 | ||||||
|  | // to  be used by tests to ensure we're testing what we believe we are.
 | ||||||
|  | func (t *Tree) waitBuild() { | ||||||
|  | 	// Find the rebuild termination channel
 | ||||||
|  | 	var done chan struct{} | ||||||
|  | 
 | ||||||
|  | 	t.lock.RLock() | ||||||
|  | 	for _, layer := range t.layers { | ||||||
|  | 		if layer, ok := layer.(*diskLayer); ok { | ||||||
|  | 			done = layer.genPending | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	t.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	// Wait until the snapshot is generated
 | ||||||
|  | 	if done != nil { | ||||||
|  | 		<-done | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
 | ||||||
|  | // snapshot is maintained for that block.
 | ||||||
|  | func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot { | ||||||
|  | 	t.lock.RLock() | ||||||
|  | 	defer t.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	return t.layers[blockRoot] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Update adds a new snapshot into the tree, if that can be linked to an existing
 | ||||||
|  | // old parent. It is disallowed to insert a disk layer (the origin of all).
 | ||||||
|  | func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { | ||||||
|  | 	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
 | ||||||
|  | 	// special case that can only happen for Clique networks where empty blocks
 | ||||||
|  | 	// don't modify the state (0 block subsidy).
 | ||||||
|  | 	//
 | ||||||
|  | 	// Although we could silently ignore this internally, it should be the caller's
 | ||||||
|  | 	// responsibility to avoid even attempting to insert such a snapshot.
 | ||||||
|  | 	if blockRoot == parentRoot { | ||||||
|  | 		return errSnapshotCycle | ||||||
|  | 	} | ||||||
|  | 	// Generate a new snapshot on top of the parent
 | ||||||
|  | 	parent := t.Snapshot(parentRoot).(snapshot) | ||||||
|  | 	if parent == nil { | ||||||
|  | 		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) | ||||||
|  | 	} | ||||||
|  | 	snap := parent.Update(blockRoot, destructs, accounts, storage) | ||||||
|  | 
 | ||||||
|  | 	// Save the new snapshot for later
 | ||||||
|  | 	t.lock.Lock() | ||||||
|  | 	defer t.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	t.layers[snap.root] = snap | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cap traverses downwards the snapshot tree from a head block hash until the
 | ||||||
|  | // number of allowed layers are crossed. All layers beyond the permitted number
 | ||||||
|  | // are flattened downwards.
 | ||||||
|  | func (t *Tree) Cap(root common.Hash, layers int) error { | ||||||
|  | 	// Retrieve the head snapshot to cap from
 | ||||||
|  | 	snap := t.Snapshot(root) | ||||||
|  | 	if snap == nil { | ||||||
|  | 		return fmt.Errorf("snapshot [%#x] missing", root) | ||||||
|  | 	} | ||||||
|  | 	diff, ok := snap.(*diffLayer) | ||||||
|  | 	if !ok { | ||||||
|  | 		return fmt.Errorf("snapshot [%#x] is disk layer", root) | ||||||
|  | 	} | ||||||
|  | 	// Run the internal capping and discard all stale layers
 | ||||||
|  | 	t.lock.Lock() | ||||||
|  | 	defer t.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Flattening the bottom-most diff layer requires special casing since there's
 | ||||||
|  | 	// no child to rewire to the grandparent. In that case we can fake a temporary
 | ||||||
|  | 	// child for the capping and then remove it.
 | ||||||
|  | 	var persisted *diskLayer | ||||||
|  | 
 | ||||||
|  | 	switch layers { | ||||||
|  | 	case 0: | ||||||
|  | 		// If full commit was requested, flatten the diffs and merge onto disk
 | ||||||
|  | 		diff.lock.RLock() | ||||||
|  | 		base := diffToDisk(diff.flatten().(*diffLayer)) | ||||||
|  | 		diff.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 		// Replace the entire snapshot tree with the flat base
 | ||||||
|  | 		t.layers = map[common.Hash]snapshot{base.root: base} | ||||||
|  | 		return nil | ||||||
|  | 
 | ||||||
|  | 	case 1: | ||||||
|  | 		// If full flattening was requested, flatten the diffs but only merge if the
 | ||||||
|  | 		// memory limit was reached
 | ||||||
|  | 		var ( | ||||||
|  | 			bottom *diffLayer | ||||||
|  | 			base   *diskLayer | ||||||
|  | 		) | ||||||
|  | 		diff.lock.RLock() | ||||||
|  | 		bottom = diff.flatten().(*diffLayer) | ||||||
|  | 		if bottom.memory >= aggregatorMemoryLimit { | ||||||
|  | 			base = diffToDisk(bottom) | ||||||
|  | 		} | ||||||
|  | 		diff.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 		// If all diff layers were removed, replace the entire snapshot tree
 | ||||||
|  | 		if base != nil { | ||||||
|  | 			t.layers = map[common.Hash]snapshot{base.root: base} | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		// Merge the new aggregated layer into the snapshot tree, clean stales below
 | ||||||
|  | 		t.layers[bottom.root] = bottom | ||||||
|  | 
 | ||||||
|  | 	default: | ||||||
|  | 		// Many layers requested to be retained, cap normally
 | ||||||
|  | 		persisted = t.cap(diff, layers) | ||||||
|  | 	} | ||||||
|  | 	// Remove any layer that is stale or links into a stale layer
 | ||||||
|  | 	children := make(map[common.Hash][]common.Hash) | ||||||
|  | 	for root, snap := range t.layers { | ||||||
|  | 		if diff, ok := snap.(*diffLayer); ok { | ||||||
|  | 			parent := diff.parent.Root() | ||||||
|  | 			children[parent] = append(children[parent], root) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	var remove func(root common.Hash) | ||||||
|  | 	remove = func(root common.Hash) { | ||||||
|  | 		delete(t.layers, root) | ||||||
|  | 		for _, child := range children[root] { | ||||||
|  | 			remove(child) | ||||||
|  | 		} | ||||||
|  | 		delete(children, root) | ||||||
|  | 	} | ||||||
|  | 	for root, snap := range t.layers { | ||||||
|  | 		if snap.Stale() { | ||||||
|  | 			remove(root) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// If the disk layer was modified, regenerate all the cummulative blooms
 | ||||||
|  | 	if persisted != nil { | ||||||
|  | 		var rebloom func(root common.Hash) | ||||||
|  | 		rebloom = func(root common.Hash) { | ||||||
|  | 			if diff, ok := t.layers[root].(*diffLayer); ok { | ||||||
|  | 				diff.rebloom(persisted) | ||||||
|  | 			} | ||||||
|  | 			for _, child := range children[root] { | ||||||
|  | 				rebloom(child) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		rebloom(persisted.root) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // cap traverses downwards the diff tree until the number of allowed layers are
 | ||||||
|  | // crossed. All diffs beyond the permitted number are flattened downwards. If the
 | ||||||
|  | // layer limit is reached, memory cap is also enforced (but not before).
 | ||||||
|  | //
 | ||||||
|  | // The method returns the new disk layer if diffs were persistend into it.
 | ||||||
|  | func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { | ||||||
|  | 	// Dive until we run out of layers or reach the persistent database
 | ||||||
|  | 	for ; layers > 2; layers-- { | ||||||
|  | 		// If we still have diff layers below, continue down
 | ||||||
|  | 		if parent, ok := diff.parent.(*diffLayer); ok { | ||||||
|  | 			diff = parent | ||||||
|  | 		} else { | ||||||
|  | 			// Diff stack too shallow, return without modifications
 | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// We're out of layers, flatten anything below, stopping if it's the disk or if
 | ||||||
|  | 	// the memory limit is not yet exceeded.
 | ||||||
|  | 	switch parent := diff.parent.(type) { | ||||||
|  | 	case *diskLayer: | ||||||
|  | 		return nil | ||||||
|  | 
 | ||||||
|  | 	case *diffLayer: | ||||||
|  | 		// Flatten the parent into the grandparent. The flattening internally obtains a
 | ||||||
|  | 		// write lock on grandparent.
 | ||||||
|  | 		flattened := parent.flatten().(*diffLayer) | ||||||
|  | 		t.layers[flattened.root] = flattened | ||||||
|  | 
 | ||||||
|  | 		diff.lock.Lock() | ||||||
|  | 		defer diff.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 		diff.parent = flattened | ||||||
|  | 		if flattened.memory < aggregatorMemoryLimit { | ||||||
|  | 			// Accumulator layer is smaller than the limit, so we can abort, unless
 | ||||||
|  | 			// there's a snapshot being generated currently. In that case, the trie
 | ||||||
|  | 			// will move fron underneath the generator so we **must** merge all the
 | ||||||
|  | 			// partial data down into the snapshot and restart the generation.
 | ||||||
|  | 			if flattened.parent.(*diskLayer).genAbort == nil { | ||||||
|  | 				return nil | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	default: | ||||||
|  | 		panic(fmt.Sprintf("unknown data layer: %T", parent)) | ||||||
|  | 	} | ||||||
|  | 	// If the bottom-most layer is larger than our memory cap, persist to disk
 | ||||||
|  | 	bottom := diff.parent.(*diffLayer) | ||||||
|  | 
 | ||||||
|  | 	bottom.lock.RLock() | ||||||
|  | 	base := diffToDisk(bottom) | ||||||
|  | 	bottom.lock.RUnlock() | ||||||
|  | 
 | ||||||
|  | 	t.layers[base.root] = base | ||||||
|  | 	diff.parent = base | ||||||
|  | 	return base | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
 | ||||||
|  | // it. The method will panic if called onto a non-bottom-most diff layer.
 | ||||||
|  | func diffToDisk(bottom *diffLayer) *diskLayer { | ||||||
|  | 	var ( | ||||||
|  | 		base  = bottom.parent.(*diskLayer) | ||||||
|  | 		batch = base.diskdb.NewBatch() | ||||||
|  | 		stats *generatorStats | ||||||
|  | 	) | ||||||
|  | 	// If the disk layer is running a snapshot generator, abort it
 | ||||||
|  | 	if base.genAbort != nil { | ||||||
|  | 		abort := make(chan *generatorStats) | ||||||
|  | 		base.genAbort <- abort | ||||||
|  | 		stats = <-abort | ||||||
|  | 	} | ||||||
|  | 	// Start by temporarily deleting the current snapshot block marker. This
 | ||||||
|  | 	// ensures that in the case of a crash, the entire snapshot is invalidated.
 | ||||||
|  | 	rawdb.DeleteSnapshotRoot(batch) | ||||||
|  | 
 | ||||||
|  | 	// Mark the original base as stale as we're going to create a new wrapper
 | ||||||
|  | 	base.lock.Lock() | ||||||
|  | 	if base.stale { | ||||||
|  | 		panic("parent disk layer is stale") // we've committed into the same base from two children, boo
 | ||||||
|  | 	} | ||||||
|  | 	base.stale = true | ||||||
|  | 	base.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Destroy all the destructed accounts from the database
 | ||||||
|  | 	for hash := range bottom.destructSet { | ||||||
|  | 		// Skip any account not covered yet by the snapshot
 | ||||||
|  | 		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		// Remove all storage slots
 | ||||||
|  | 		rawdb.DeleteAccountSnapshot(batch, hash) | ||||||
|  | 		base.cache.Set(hash[:], nil) | ||||||
|  | 
 | ||||||
|  | 		it := rawdb.IterateStorageSnapshots(base.diskdb, hash) | ||||||
|  | 		for it.Next() { | ||||||
|  | 			if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
 | ||||||
|  | 				batch.Delete(key) | ||||||
|  | 				base.cache.Del(key[1:]) | ||||||
|  | 
 | ||||||
|  | 				snapshotFlushStorageItemMeter.Mark(1) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		it.Release() | ||||||
|  | 	} | ||||||
|  | 	// Push all updated accounts into the database
 | ||||||
|  | 	for hash, data := range bottom.accountData { | ||||||
|  | 		// Skip any account not covered yet by the snapshot
 | ||||||
|  | 		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		// Push the account to disk
 | ||||||
|  | 		rawdb.WriteAccountSnapshot(batch, hash, data) | ||||||
|  | 		base.cache.Set(hash[:], data) | ||||||
|  | 		snapshotCleanAccountWriteMeter.Mark(int64(len(data))) | ||||||
|  | 
 | ||||||
|  | 		if batch.ValueSize() > ethdb.IdealBatchSize { | ||||||
|  | 			if err := batch.Write(); err != nil { | ||||||
|  | 				log.Crit("Failed to write account snapshot", "err", err) | ||||||
|  | 			} | ||||||
|  | 			batch.Reset() | ||||||
|  | 		} | ||||||
|  | 		snapshotFlushAccountItemMeter.Mark(1) | ||||||
|  | 		snapshotFlushAccountSizeMeter.Mark(int64(len(data))) | ||||||
|  | 	} | ||||||
|  | 	// Push all the storage slots into the database
 | ||||||
|  | 	for accountHash, storage := range bottom.storageData { | ||||||
|  | 		// Skip any account not covered yet by the snapshot
 | ||||||
|  | 		if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		// Generation might be mid-account, track that case too
 | ||||||
|  | 		midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength]) | ||||||
|  | 
 | ||||||
|  | 		for storageHash, data := range storage { | ||||||
|  | 			// Skip any slot not covered yet by the snapshot
 | ||||||
|  | 			if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 			if len(data) > 0 { | ||||||
|  | 				rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data) | ||||||
|  | 				base.cache.Set(append(accountHash[:], storageHash[:]...), data) | ||||||
|  | 				snapshotCleanStorageWriteMeter.Mark(int64(len(data))) | ||||||
|  | 			} else { | ||||||
|  | 				rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash) | ||||||
|  | 				base.cache.Set(append(accountHash[:], storageHash[:]...), nil) | ||||||
|  | 			} | ||||||
|  | 			snapshotFlushStorageItemMeter.Mark(1) | ||||||
|  | 			snapshotFlushStorageSizeMeter.Mark(int64(len(data))) | ||||||
|  | 		} | ||||||
|  | 		if batch.ValueSize() > ethdb.IdealBatchSize { | ||||||
|  | 			if err := batch.Write(); err != nil { | ||||||
|  | 				log.Crit("Failed to write storage snapshot", "err", err) | ||||||
|  | 			} | ||||||
|  | 			batch.Reset() | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Update the snapshot block marker and write any remainder data
 | ||||||
|  | 	rawdb.WriteSnapshotRoot(batch, bottom.root) | ||||||
|  | 	if err := batch.Write(); err != nil { | ||||||
|  | 		log.Crit("Failed to write leftover snapshot", "err", err) | ||||||
|  | 	} | ||||||
|  | 	res := &diskLayer{ | ||||||
|  | 		root:       bottom.root, | ||||||
|  | 		cache:      base.cache, | ||||||
|  | 		diskdb:     base.diskdb, | ||||||
|  | 		triedb:     base.triedb, | ||||||
|  | 		genMarker:  base.genMarker, | ||||||
|  | 		genPending: base.genPending, | ||||||
|  | 	} | ||||||
|  | 	// If snapshot generation hasn't finished yet, port over all the starts and
 | ||||||
|  | 	// continue where the previous round left off.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Note, the `base.genAbort` comparison is not used normally, it's checked
 | ||||||
|  | 	// to allow the tests to play with the marker without triggering this path.
 | ||||||
|  | 	if base.genMarker != nil && base.genAbort != nil { | ||||||
|  | 		res.genMarker = base.genMarker | ||||||
|  | 		res.genAbort = make(chan chan *generatorStats) | ||||||
|  | 		go res.generate(stats) | ||||||
|  | 	} | ||||||
|  | 	return res | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Journal commits an entire diff hierarchy to disk into a single journal entry.
 | ||||||
|  | // This is meant to be used during shutdown to persist the snapshot without
 | ||||||
|  | // flattening everything down (bad for reorgs).
 | ||||||
|  | //
 | ||||||
|  | // The method returns the root hash of the base layer that needs to be persisted
 | ||||||
|  | // to disk as a trie too to allow continuing any pending generation op.
 | ||||||
|  | func (t *Tree) Journal(root common.Hash) (common.Hash, error) { | ||||||
|  | 	// Retrieve the head snapshot to journal from var snap snapshot
 | ||||||
|  | 	snap := t.Snapshot(root) | ||||||
|  | 	if snap == nil { | ||||||
|  | 		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) | ||||||
|  | 	} | ||||||
|  | 	// Run the journaling
 | ||||||
|  | 	t.lock.Lock() | ||||||
|  | 	defer t.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	journal := new(bytes.Buffer) | ||||||
|  | 	base, err := snap.(snapshot).Journal(journal) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return common.Hash{}, err | ||||||
|  | 	} | ||||||
|  | 	// Store the journal into the database and return
 | ||||||
|  | 	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes()) | ||||||
|  | 	return base, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Rebuild wipes all available snapshot data from the persistent database and
 | ||||||
|  | // discard all caches and diff layers. Afterwards, it starts a new snapshot
 | ||||||
|  | // generator with the given root hash.
 | ||||||
|  | func (t *Tree) Rebuild(root common.Hash) { | ||||||
|  | 	t.lock.Lock() | ||||||
|  | 	defer t.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	// Track whether there's a wipe currently running and keep it alive if so
 | ||||||
|  | 	var wiper chan struct{} | ||||||
|  | 
 | ||||||
|  | 	// Iterate over and mark all layers stale
 | ||||||
|  | 	for _, layer := range t.layers { | ||||||
|  | 		switch layer := layer.(type) { | ||||||
|  | 		case *diskLayer: | ||||||
|  | 			// If the base layer is generating, abort it and save
 | ||||||
|  | 			if layer.genAbort != nil { | ||||||
|  | 				abort := make(chan *generatorStats) | ||||||
|  | 				layer.genAbort <- abort | ||||||
|  | 
 | ||||||
|  | 				if stats := <-abort; stats != nil { | ||||||
|  | 					wiper = stats.wiping | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			// Layer should be inactive now, mark it as stale
 | ||||||
|  | 			layer.lock.Lock() | ||||||
|  | 			layer.stale = true | ||||||
|  | 			layer.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 		case *diffLayer: | ||||||
|  | 			// If the layer is a simple diff, simply mark as stale
 | ||||||
|  | 			layer.lock.Lock() | ||||||
|  | 			atomic.StoreUint32(&layer.stale, 1) | ||||||
|  | 			layer.lock.Unlock() | ||||||
|  | 
 | ||||||
|  | 		default: | ||||||
|  | 			panic(fmt.Sprintf("unknown layer type: %T", layer)) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Start generating a new snapshot from scratch on a backgroung thread. The
 | ||||||
|  | 	// generator will run a wiper first if there's not one running right now.
 | ||||||
|  | 	log.Info("Rebuilding state snapshot") | ||||||
|  | 	t.layers = map[common.Hash]snapshot{ | ||||||
|  | 		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root, wiper), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountIterator creates a new account iterator for the specified root hash and
 | ||||||
|  | // seeks to a starting account hash.
 | ||||||
|  | func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) { | ||||||
|  | 	return newFastAccountIterator(t, root, seek) | ||||||
|  | } | ||||||
							
								
								
									
										348
									
								
								core/state/snapshot/snapshot_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										348
									
								
								core/state/snapshot/snapshot_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,348 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"math/big" | ||||||
|  | 	"math/rand" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/VictoriaMetrics/fastcache" | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/rlp" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // randomHash generates a random blob of data and returns it as a hash.
 | ||||||
|  | func randomHash() common.Hash { | ||||||
|  | 	var hash common.Hash | ||||||
|  | 	if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	return hash | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // randomAccount generates a random account and returns it RLP encoded.
 | ||||||
|  | func randomAccount() []byte { | ||||||
|  | 	root := randomHash() | ||||||
|  | 	a := Account{ | ||||||
|  | 		Balance:  big.NewInt(rand.Int63()), | ||||||
|  | 		Nonce:    rand.Uint64(), | ||||||
|  | 		Root:     root[:], | ||||||
|  | 		CodeHash: emptyCode[:], | ||||||
|  | 	} | ||||||
|  | 	data, _ := rlp.EncodeToBytes(a) | ||||||
|  | 	return data | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // randomAccountSet generates a set of random accounts with the given strings as
 | ||||||
|  | // the account address hashes.
 | ||||||
|  | func randomAccountSet(hashes ...string) map[common.Hash][]byte { | ||||||
|  | 	accounts := make(map[common.Hash][]byte) | ||||||
|  | 	for _, hash := range hashes { | ||||||
|  | 		accounts[common.HexToHash(hash)] = randomAccount() | ||||||
|  | 	} | ||||||
|  | 	return accounts | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that if a disk layer becomes stale, no active external references will
 | ||||||
|  | // be returned with junk data. This version of the test flattens every diff layer
 | ||||||
|  | // to check internal corner case around the bottom-most memory accumulator.
 | ||||||
|  | func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Retrieve a reference to the base and commit a diff on top
 | ||||||
|  | 	ref := snaps.Snapshot(base.root) | ||||||
|  | 
 | ||||||
|  | 	accounts := map[common.Hash][]byte{ | ||||||
|  | 		common.HexToHash("0xa1"): randomAccount(), | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 2 { | ||||||
|  | 		t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2) | ||||||
|  | 	} | ||||||
|  | 	// Commit the diff layer onto the disk and ensure it's persisted
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil { | ||||||
|  | 		t.Fatalf("failed to merge diff layer onto disk: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Since the base layer was modified, ensure that data retrieval on the external reference fail
 | ||||||
|  | 	if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) | ||||||
|  | 	} | ||||||
|  | 	if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 1 { | ||||||
|  | 		t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 1) | ||||||
|  | 		fmt.Println(snaps.layers) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that if a disk layer becomes stale, no active external references will
 | ||||||
|  | // be returned with junk data. This version of the test retains the bottom diff
 | ||||||
|  | // layer to check the usual mode of operation where the accumulator is retained.
 | ||||||
|  | func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Retrieve a reference to the base and commit two diffs on top
 | ||||||
|  | 	ref := snaps.Snapshot(base.root) | ||||||
|  | 
 | ||||||
|  | 	accounts := map[common.Hash][]byte{ | ||||||
|  | 		common.HexToHash("0xa1"): randomAccount(), | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 3 { | ||||||
|  | 		t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) | ||||||
|  | 	} | ||||||
|  | 	// Commit the diff layer onto the disk and ensure it's persisted
 | ||||||
|  | 	defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) | ||||||
|  | 	aggregatorMemoryLimit = 0 | ||||||
|  | 
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x03"), 2); err != nil { | ||||||
|  | 		t.Fatalf("failed to merge diff layer onto disk: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Since the base layer was modified, ensure that data retrievald on the external reference fail
 | ||||||
|  | 	if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) | ||||||
|  | 	} | ||||||
|  | 	if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 2 { | ||||||
|  | 		t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) | ||||||
|  | 		fmt.Println(snaps.layers) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that if a diff layer becomes stale, no active external references will
 | ||||||
|  | // be returned with junk data. This version of the test flattens every diff layer
 | ||||||
|  | // to check internal corner case around the bottom-most memory accumulator.
 | ||||||
|  | func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Commit two diffs on top and retrieve a reference to the bottommost
 | ||||||
|  | 	accounts := map[common.Hash][]byte{ | ||||||
|  | 		common.HexToHash("0xa1"): randomAccount(), | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 3 { | ||||||
|  | 		t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) | ||||||
|  | 	} | ||||||
|  | 	ref := snaps.Snapshot(common.HexToHash("0x02")) | ||||||
|  | 
 | ||||||
|  | 	// Flatten the diff layer into the bottom accumulator
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil { | ||||||
|  | 		t.Fatalf("failed to flatten diff layer into accumulator: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
 | ||||||
|  | 	if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) | ||||||
|  | 	} | ||||||
|  | 	if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 2 { | ||||||
|  | 		t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) | ||||||
|  | 		fmt.Println(snaps.layers) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Tests that if a diff layer becomes stale, no active external references will
 | ||||||
|  | // be returned with junk data. This version of the test retains the bottom diff
 | ||||||
|  | // layer to check the usual mode of operation where the accumulator is retained.
 | ||||||
|  | func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { | ||||||
|  | 	// Create an empty base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// Commit three diffs on top and retrieve a reference to the bottommost
 | ||||||
|  | 	accounts := map[common.Hash][]byte{ | ||||||
|  | 		common.HexToHash("0xa1"): randomAccount(), | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if err := snaps.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), nil, accounts, nil); err != nil { | ||||||
|  | 		t.Fatalf("failed to create a diff layer: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 4 { | ||||||
|  | 		t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 4) | ||||||
|  | 	} | ||||||
|  | 	ref := snaps.Snapshot(common.HexToHash("0x02")) | ||||||
|  | 
 | ||||||
|  | 	// Doing a Cap operation with many allowed layers should be a no-op
 | ||||||
|  | 	exp := len(snaps.layers) | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil { | ||||||
|  | 		t.Fatalf("failed to flatten diff layer into accumulator: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if got := len(snaps.layers); got != exp { | ||||||
|  | 		t.Errorf("layers modified, got %d exp %d", got, exp) | ||||||
|  | 	} | ||||||
|  | 	// Flatten the diff layer into the bottom accumulator
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x04"), 2); err != nil { | ||||||
|  | 		t.Fatalf("failed to flatten diff layer into accumulator: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail
 | ||||||
|  | 	if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) | ||||||
|  | 	} | ||||||
|  | 	if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { | ||||||
|  | 		t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) | ||||||
|  | 	} | ||||||
|  | 	if n := len(snaps.layers); n != 3 { | ||||||
|  | 		t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 3) | ||||||
|  | 		fmt.Println(snaps.layers) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TestPostCapBasicDataAccess tests some functionality regarding capping/flattening.
 | ||||||
|  | func TestPostCapBasicDataAccess(t *testing.T) { | ||||||
|  | 	// setAccount is a helper to construct a random account entry and assign it to
 | ||||||
|  | 	// an account slot in a snapshot
 | ||||||
|  | 	setAccount := func(accKey string) map[common.Hash][]byte { | ||||||
|  | 		return map[common.Hash][]byte{ | ||||||
|  | 			common.HexToHash(accKey): randomAccount(), | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Create a starting base layer and a snapshot tree out of it
 | ||||||
|  | 	base := &diskLayer{ | ||||||
|  | 		diskdb: rawdb.NewMemoryDatabase(), | ||||||
|  | 		root:   common.HexToHash("0x01"), | ||||||
|  | 		cache:  fastcache.New(1024 * 500), | ||||||
|  | 	} | ||||||
|  | 	snaps := &Tree{ | ||||||
|  | 		layers: map[common.Hash]snapshot{ | ||||||
|  | 			base.root: base, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 	// The lowest difflayer
 | ||||||
|  | 	snaps.Update(common.HexToHash("0xa1"), common.HexToHash("0x01"), nil, setAccount("0xa1"), nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0xa2"), common.HexToHash("0xa1"), nil, setAccount("0xa2"), nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0xb2"), common.HexToHash("0xa1"), nil, setAccount("0xb2"), nil) | ||||||
|  | 
 | ||||||
|  | 	snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) | ||||||
|  | 	snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) | ||||||
|  | 
 | ||||||
|  | 	// checkExist verifies if an account exiss in a snapshot
 | ||||||
|  | 	checkExist := func(layer *diffLayer, key string) error { | ||||||
|  | 		if data, _ := layer.Account(common.HexToHash(key)); data == nil { | ||||||
|  | 			return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key)) | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	// shouldErr checks that an account access errors as expected
 | ||||||
|  | 	shouldErr := func(layer *diffLayer, key string) error { | ||||||
|  | 		if data, err := layer.Account(common.HexToHash(key)); err == nil { | ||||||
|  | 			return fmt.Errorf("expected error, got data %x", data) | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	// check basics
 | ||||||
|  | 	snap := snaps.Snapshot(common.HexToHash("0xb3")).(*diffLayer) | ||||||
|  | 
 | ||||||
|  | 	if err := checkExist(snap, "0xa1"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	if err := checkExist(snap, "0xb2"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	if err := checkExist(snap, "0xb3"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	// Cap to a bad root should fail
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil { | ||||||
|  | 		t.Errorf("expected error, got none") | ||||||
|  | 	} | ||||||
|  | 	// Now, merge the a-chain
 | ||||||
|  | 	snaps.Cap(common.HexToHash("0xa3"), 0) | ||||||
|  | 
 | ||||||
|  | 	// At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is
 | ||||||
|  | 	// the parent of b2, b2 should no longer be able to iterate into parent.
 | ||||||
|  | 
 | ||||||
|  | 	// These should still be accessible
 | ||||||
|  | 	if err := checkExist(snap, "0xb2"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	if err := checkExist(snap, "0xb3"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	// But these would need iteration into the modified parent
 | ||||||
|  | 	if err := shouldErr(snap, "0xa1"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	if err := shouldErr(snap, "0xa2"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	if err := shouldErr(snap, "0xa3"); err != nil { | ||||||
|  | 		t.Error(err) | ||||||
|  | 	} | ||||||
|  | 	// Now, merge it again, just for fun. It should now error, since a3
 | ||||||
|  | 	// is a disk layer
 | ||||||
|  | 	if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil { | ||||||
|  | 		t.Error("expected error capping the disk layer, got none") | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										36
									
								
								core/state/snapshot/sort.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										36
									
								
								core/state/snapshot/sort.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,36 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // hashes is a helper to implement sort.Interface.
 | ||||||
|  | type hashes []common.Hash | ||||||
|  | 
 | ||||||
|  | // Len is the number of elements in the collection.
 | ||||||
|  | func (hs hashes) Len() int { return len(hs) } | ||||||
|  | 
 | ||||||
|  | // Less reports whether the element with index i should sort before the element
 | ||||||
|  | // with index j.
 | ||||||
|  | func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 } | ||||||
|  | 
 | ||||||
|  | // Swap swaps the elements with indexes i and j.
 | ||||||
|  | func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } | ||||||
							
								
								
									
										130
									
								
								core/state/snapshot/wipe.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										130
									
								
								core/state/snapshot/wipe.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,130 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/log" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // wipeSnapshot starts a goroutine to iterate over the entire key-value database
 | ||||||
|  | // and delete all the  data associated with the snapshot (accounts, storage,
 | ||||||
|  | // metadata). After all is done, the snapshot range of the database is compacted
 | ||||||
|  | // to free up unused data blocks.
 | ||||||
|  | func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} { | ||||||
|  | 	// Wipe the snapshot root marker synchronously
 | ||||||
|  | 	if full { | ||||||
|  | 		rawdb.DeleteSnapshotRoot(db) | ||||||
|  | 	} | ||||||
|  | 	// Wipe everything else asynchronously
 | ||||||
|  | 	wiper := make(chan struct{}, 1) | ||||||
|  | 	go func() { | ||||||
|  | 		if err := wipeContent(db); err != nil { | ||||||
|  | 			log.Error("Failed to wipe state snapshot", "err", err) // Database close will trigger this
 | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		close(wiper) | ||||||
|  | 	}() | ||||||
|  | 	return wiper | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // wipeContent iterates over the entire key-value database and deletes all the
 | ||||||
|  | // data associated with the snapshot (accounts, storage), but not the root hash
 | ||||||
|  | // as the wiper is meant to run on a background thread but the root needs to be
 | ||||||
|  | // removed in sync to avoid data races. After all is done, the snapshot range of
 | ||||||
|  | // the database is compacted to free up unused data blocks.
 | ||||||
|  | func wipeContent(db ethdb.KeyValueStore) error { | ||||||
|  | 	if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, len(rawdb.SnapshotAccountPrefix)+common.HashLength); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	// Compact the snapshot section of the database to get rid of unused space
 | ||||||
|  | 	start := time.Now() | ||||||
|  | 
 | ||||||
|  | 	log.Info("Compacting snapshot account area ") | ||||||
|  | 	end := common.CopyBytes(rawdb.SnapshotAccountPrefix) | ||||||
|  | 	end[len(end)-1]++ | ||||||
|  | 
 | ||||||
|  | 	if err := db.Compact(rawdb.SnapshotAccountPrefix, end); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	log.Info("Compacting snapshot storage area ") | ||||||
|  | 	end = common.CopyBytes(rawdb.SnapshotStoragePrefix) | ||||||
|  | 	end[len(end)-1]++ | ||||||
|  | 
 | ||||||
|  | 	if err := db.Compact(rawdb.SnapshotStoragePrefix, end); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	log.Info("Compacted snapshot area in database", "elapsed", common.PrettyDuration(time.Since(start))) | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // wipeKeyRange deletes a range of keys from the database starting with prefix
 | ||||||
|  | // and having a specific total key length.
 | ||||||
|  | func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int) error { | ||||||
|  | 	// Batch deletions together to avoid holding an iterator for too long
 | ||||||
|  | 	var ( | ||||||
|  | 		batch = db.NewBatch() | ||||||
|  | 		items int | ||||||
|  | 	) | ||||||
|  | 	// Iterate over the key-range and delete all of them
 | ||||||
|  | 	start, logged := time.Now(), time.Now() | ||||||
|  | 
 | ||||||
|  | 	it := db.NewIteratorWithStart(prefix) | ||||||
|  | 	for it.Next() { | ||||||
|  | 		// Skip any keys with the correct prefix but wrong lenth (trie nodes)
 | ||||||
|  | 		key := it.Key() | ||||||
|  | 		if !bytes.HasPrefix(key, prefix) { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 		if len(key) != keylen { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		// Delete the key and periodically recreate the batch and iterator
 | ||||||
|  | 		batch.Delete(key) | ||||||
|  | 		items++ | ||||||
|  | 
 | ||||||
|  | 		if items%10000 == 0 { | ||||||
|  | 			// Batch too large (or iterator too long lived, flush and recreate)
 | ||||||
|  | 			it.Release() | ||||||
|  | 			if err := batch.Write(); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			batch.Reset() | ||||||
|  | 			it = db.NewIteratorWithStart(key) | ||||||
|  | 
 | ||||||
|  | 			if time.Since(logged) > 8*time.Second { | ||||||
|  | 				log.Info("Deleting state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start))) | ||||||
|  | 				logged = time.Now() | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	it.Release() | ||||||
|  | 	if err := batch.Write(); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	log.Info("Deleted state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start))) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										124
									
								
								core/state/snapshot/wipe_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										124
									
								
								core/state/snapshot/wipe_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,124 @@ | |||||||
|  | // Copyright 2019 The go-ethereum Authors
 | ||||||
|  | // This file is part of the go-ethereum library.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||||
|  | // it under the terms of the GNU Lesser General Public License as published by
 | ||||||
|  | // the Free Software Foundation, either version 3 of the License, or
 | ||||||
|  | // (at your option) any later version.
 | ||||||
|  | //
 | ||||||
|  | // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||||
|  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||||
|  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||||
|  | // GNU Lesser General Public License for more details.
 | ||||||
|  | //
 | ||||||
|  | // You should have received a copy of the GNU Lesser General Public License
 | ||||||
|  | // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||||
|  | 
 | ||||||
|  | package snapshot | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"math/rand" | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||||
|  | 	"github.com/ethereum/go-ethereum/ethdb/memorydb" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Tests that given a database with random data content, all parts of a snapshot
 | ||||||
|  | // can be crrectly wiped without touching anything else.
 | ||||||
|  | func TestWipe(t *testing.T) { | ||||||
|  | 	// Create a database with some random snapshot data
 | ||||||
|  | 	db := memorydb.New() | ||||||
|  | 
 | ||||||
|  | 	for i := 0; i < 128; i++ { | ||||||
|  | 		account := randomHash() | ||||||
|  | 		rawdb.WriteAccountSnapshot(db, account, randomHash().Bytes()) | ||||||
|  | 		for j := 0; j < 1024; j++ { | ||||||
|  | 			rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes()) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	rawdb.WriteSnapshotRoot(db, randomHash()) | ||||||
|  | 
 | ||||||
|  | 	// Add some random non-snapshot data too to make wiping harder
 | ||||||
|  | 	for i := 0; i < 65536; i++ { | ||||||
|  | 		// Generate a key that's the wrong length for a state snapshot item
 | ||||||
|  | 		var keysize int | ||||||
|  | 		for keysize == 0 || keysize == 32 || keysize == 64 { | ||||||
|  | 			keysize = 8 + rand.Intn(64) // +8 to ensure we will "never" randomize duplicates
 | ||||||
|  | 		} | ||||||
|  | 		// Randomize the suffix, dedup and inject it under the snapshot namespace
 | ||||||
|  | 		keysuffix := make([]byte, keysize) | ||||||
|  | 		rand.Read(keysuffix) | ||||||
|  | 
 | ||||||
|  | 		if rand.Int31n(2) == 0 { | ||||||
|  | 			db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes()) | ||||||
|  | 		} else { | ||||||
|  | 			db.Put(append(rawdb.SnapshotStoragePrefix, keysuffix...), randomHash().Bytes()) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Sanity check that all the keys are present
 | ||||||
|  | 	var items int | ||||||
|  | 
 | ||||||
|  | 	it := db.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 		key := it.Key() | ||||||
|  | 		if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { | ||||||
|  | 			items++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	it = db.NewIteratorWithPrefix(rawdb.SnapshotStoragePrefix) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 		key := it.Key() | ||||||
|  | 		if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { | ||||||
|  | 			items++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if items != 128+128*1024 { | ||||||
|  | 		t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024) | ||||||
|  | 	} | ||||||
|  | 	if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) { | ||||||
|  | 		t.Errorf("snapshot block marker mismatch: have %#x, want <not-nil>", hash) | ||||||
|  | 	} | ||||||
|  | 	// Wipe all snapshot entries from the database
 | ||||||
|  | 	<-wipeSnapshot(db, true) | ||||||
|  | 
 | ||||||
|  | 	// Iterate over the database end ensure no snapshot information remains
 | ||||||
|  | 	it = db.NewIteratorWithPrefix(rawdb.SnapshotAccountPrefix) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 		key := it.Key() | ||||||
|  | 		if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { | ||||||
|  | 			t.Errorf("snapshot entry remained after wipe: %x", key) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	it = db.NewIteratorWithPrefix(rawdb.SnapshotStoragePrefix) | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 		key := it.Key() | ||||||
|  | 		if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { | ||||||
|  | 			t.Errorf("snapshot entry remained after wipe: %x", key) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) { | ||||||
|  | 		t.Errorf("snapshot block marker remained after wipe: %#x", hash) | ||||||
|  | 	} | ||||||
|  | 	// Iterate over the database and ensure miscellaneous items are present
 | ||||||
|  | 	items = 0 | ||||||
|  | 
 | ||||||
|  | 	it = db.NewIterator() | ||||||
|  | 	defer it.Release() | ||||||
|  | 
 | ||||||
|  | 	for it.Next() { | ||||||
|  | 		items++ | ||||||
|  | 	} | ||||||
|  | 	if items != 65536 { | ||||||
|  | 		t.Fatalf("misc item count mismatch: have %d, want %d", items, 65536) | ||||||
|  | 	} | ||||||
|  | } | ||||||
| @ -195,15 +195,35 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has | |||||||
| 	if value, cached := s.originStorage[key]; cached { | 	if value, cached := s.originStorage[key]; cached { | ||||||
| 		return value | 		return value | ||||||
| 	} | 	} | ||||||
| 	// Track the amount of time wasted on reading the storage trie
 | 	// If no live objects are available, attempt to use snapshots
 | ||||||
| 	if metrics.EnabledExpensive { | 	var ( | ||||||
| 		defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now()) | 		enc []byte | ||||||
|  | 		err error | ||||||
|  | 	) | ||||||
|  | 	if s.db.snap != nil { | ||||||
|  | 		if metrics.EnabledExpensive { | ||||||
|  | 			defer func(start time.Time) { s.db.SnapshotStorageReads += time.Since(start) }(time.Now()) | ||||||
|  | 		} | ||||||
|  | 		// If the object was destructed in *this* block (and potentially resurrected),
 | ||||||
|  | 		// the storage has been cleared out, and we should *not* consult the previous
 | ||||||
|  | 		// snapshot about any storage values. The only possible alternatives are:
 | ||||||
|  | 		//   1) resurrect happened, and new slot values were set -- those should
 | ||||||
|  | 		//      have been handles via pendingStorage above.
 | ||||||
|  | 		//   2) we don't have new values, and can deliver empty response back
 | ||||||
|  | 		if _, destructed := s.db.snapDestructs[s.addrHash]; destructed { | ||||||
|  | 			return common.Hash{} | ||||||
|  | 		} | ||||||
|  | 		enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key[:])) | ||||||
| 	} | 	} | ||||||
| 	// Otherwise load the value from the database
 | 	// If snapshot unavailable or reading from it failed, load from the database
 | ||||||
| 	enc, err := s.getTrie(db).TryGet(key[:]) | 	if s.db.snap == nil || err != nil { | ||||||
| 	if err != nil { | 		if metrics.EnabledExpensive { | ||||||
| 		s.setError(err) | 			defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now()) | ||||||
| 		return common.Hash{} | 		} | ||||||
|  | 		if enc, err = s.getTrie(db).TryGet(key[:]); err != nil { | ||||||
|  | 			s.setError(err) | ||||||
|  | 			return common.Hash{} | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	var value common.Hash | 	var value common.Hash | ||||||
| 	if len(enc) > 0 { | 	if len(enc) > 0 { | ||||||
| @ -283,6 +303,16 @@ func (s *stateObject) updateTrie(db Database) Trie { | |||||||
| 	if metrics.EnabledExpensive { | 	if metrics.EnabledExpensive { | ||||||
| 		defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) | 		defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) | ||||||
| 	} | 	} | ||||||
|  | 	// Retrieve the snapshot storage map for the object
 | ||||||
|  | 	var storage map[common.Hash][]byte | ||||||
|  | 	if s.db.snap != nil { | ||||||
|  | 		// Retrieve the old storage map, if available, create a new one otherwise
 | ||||||
|  | 		storage = s.db.snapStorage[s.addrHash] | ||||||
|  | 		if storage == nil { | ||||||
|  | 			storage = make(map[common.Hash][]byte) | ||||||
|  | 			s.db.snapStorage[s.addrHash] = storage | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 	// Insert all the pending updates into the trie
 | 	// Insert all the pending updates into the trie
 | ||||||
| 	tr := s.getTrie(db) | 	tr := s.getTrie(db) | ||||||
| 	for key, value := range s.pendingStorage { | 	for key, value := range s.pendingStorage { | ||||||
| @ -292,13 +322,18 @@ func (s *stateObject) updateTrie(db Database) Trie { | |||||||
| 		} | 		} | ||||||
| 		s.originStorage[key] = value | 		s.originStorage[key] = value | ||||||
| 
 | 
 | ||||||
|  | 		var v []byte | ||||||
| 		if (value == common.Hash{}) { | 		if (value == common.Hash{}) { | ||||||
| 			s.setError(tr.TryDelete(key[:])) | 			s.setError(tr.TryDelete(key[:])) | ||||||
| 			continue | 		} else { | ||||||
|  | 			// Encoding []byte cannot fail, ok to ignore the error.
 | ||||||
|  | 			v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) | ||||||
|  | 			s.setError(tr.TryUpdate(key[:], v)) | ||||||
|  | 		} | ||||||
|  | 		// If state snapshotting is active, cache the data til commit
 | ||||||
|  | 		if storage != nil { | ||||||
|  | 			storage[crypto.Keccak256Hash(key[:])] = v // v will be nil if value is 0x00
 | ||||||
| 		} | 		} | ||||||
| 		// Encoding []byte cannot fail, ok to ignore the error.
 |  | ||||||
| 		v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) |  | ||||||
| 		s.setError(tr.TryUpdate(key[:], v)) |  | ||||||
| 	} | 	} | ||||||
| 	if len(s.pendingStorage) > 0 { | 	if len(s.pendingStorage) > 0 { | ||||||
| 		s.pendingStorage = make(Storage) | 		s.pendingStorage = make(Storage) | ||||||
|  | |||||||
| @ -36,7 +36,7 @@ type stateTest struct { | |||||||
| 
 | 
 | ||||||
| func newStateTest() *stateTest { | func newStateTest() *stateTest { | ||||||
| 	db := rawdb.NewMemoryDatabase() | 	db := rawdb.NewMemoryDatabase() | ||||||
| 	sdb, _ := New(common.Hash{}, NewDatabase(db)) | 	sdb, _ := New(common.Hash{}, NewDatabase(db), nil) | ||||||
| 	return &stateTest{db: db, state: sdb} | 	return &stateTest{db: db, state: sdb} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -146,7 +146,7 @@ func TestSnapshotEmpty(t *testing.T) { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestSnapshot2(t *testing.T) { | func TestSnapshot2(t *testing.T) { | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 
 | 
 | ||||||
| 	stateobjaddr0 := toAddr([]byte("so0")) | 	stateobjaddr0 := toAddr([]byte("so0")) | ||||||
| 	stateobjaddr1 := toAddr([]byte("so1")) | 	stateobjaddr1 := toAddr([]byte("so1")) | ||||||
|  | |||||||
| @ -25,6 +25,7 @@ import ( | |||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
| 	"github.com/ethereum/go-ethereum/common" | 	"github.com/ethereum/go-ethereum/common" | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/state/snapshot" | ||||||
| 	"github.com/ethereum/go-ethereum/core/types" | 	"github.com/ethereum/go-ethereum/core/types" | ||||||
| 	"github.com/ethereum/go-ethereum/crypto" | 	"github.com/ethereum/go-ethereum/crypto" | ||||||
| 	"github.com/ethereum/go-ethereum/log" | 	"github.com/ethereum/go-ethereum/log" | ||||||
| @ -66,6 +67,12 @@ type StateDB struct { | |||||||
| 	db   Database | 	db   Database | ||||||
| 	trie Trie | 	trie Trie | ||||||
| 
 | 
 | ||||||
|  | 	snaps         *snapshot.Tree | ||||||
|  | 	snap          snapshot.Snapshot | ||||||
|  | 	snapDestructs map[common.Hash]struct{} | ||||||
|  | 	snapAccounts  map[common.Hash][]byte | ||||||
|  | 	snapStorage   map[common.Hash]map[common.Hash][]byte | ||||||
|  | 
 | ||||||
| 	// This map holds 'live' objects, which will get modified while processing a state transition.
 | 	// This map holds 'live' objects, which will get modified while processing a state transition.
 | ||||||
| 	stateObjects        map[common.Address]*stateObject | 	stateObjects        map[common.Address]*stateObject | ||||||
| 	stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
 | 	stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
 | ||||||
| @ -95,32 +102,44 @@ type StateDB struct { | |||||||
| 	nextRevisionId int | 	nextRevisionId int | ||||||
| 
 | 
 | ||||||
| 	// Measurements gathered during execution for debugging purposes
 | 	// Measurements gathered during execution for debugging purposes
 | ||||||
| 	AccountReads   time.Duration | 	AccountReads         time.Duration | ||||||
| 	AccountHashes  time.Duration | 	AccountHashes        time.Duration | ||||||
| 	AccountUpdates time.Duration | 	AccountUpdates       time.Duration | ||||||
| 	AccountCommits time.Duration | 	AccountCommits       time.Duration | ||||||
| 	StorageReads   time.Duration | 	StorageReads         time.Duration | ||||||
| 	StorageHashes  time.Duration | 	StorageHashes        time.Duration | ||||||
| 	StorageUpdates time.Duration | 	StorageUpdates       time.Duration | ||||||
| 	StorageCommits time.Duration | 	StorageCommits       time.Duration | ||||||
|  | 	SnapshotAccountReads time.Duration | ||||||
|  | 	SnapshotStorageReads time.Duration | ||||||
|  | 	SnapshotCommits      time.Duration | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Create a new state from a given trie.
 | // Create a new state from a given trie.
 | ||||||
| func New(root common.Hash, db Database) (*StateDB, error) { | func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { | ||||||
| 	tr, err := db.OpenTrie(root) | 	tr, err := db.OpenTrie(root) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 	return &StateDB{ | 	sdb := &StateDB{ | ||||||
| 		db:                  db, | 		db:                  db, | ||||||
| 		trie:                tr, | 		trie:                tr, | ||||||
|  | 		snaps:               snaps, | ||||||
| 		stateObjects:        make(map[common.Address]*stateObject), | 		stateObjects:        make(map[common.Address]*stateObject), | ||||||
| 		stateObjectsPending: make(map[common.Address]struct{}), | 		stateObjectsPending: make(map[common.Address]struct{}), | ||||||
| 		stateObjectsDirty:   make(map[common.Address]struct{}), | 		stateObjectsDirty:   make(map[common.Address]struct{}), | ||||||
| 		logs:                make(map[common.Hash][]*types.Log), | 		logs:                make(map[common.Hash][]*types.Log), | ||||||
| 		preimages:           make(map[common.Hash][]byte), | 		preimages:           make(map[common.Hash][]byte), | ||||||
| 		journal:             newJournal(), | 		journal:             newJournal(), | ||||||
| 	}, nil | 	} | ||||||
|  | 	if sdb.snaps != nil { | ||||||
|  | 		if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { | ||||||
|  | 			sdb.snapDestructs = make(map[common.Hash]struct{}) | ||||||
|  | 			sdb.snapAccounts = make(map[common.Hash][]byte) | ||||||
|  | 			sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return sdb, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // setError remembers the first non-nil error it is called with.
 | // setError remembers the first non-nil error it is called with.
 | ||||||
| @ -152,6 +171,15 @@ func (s *StateDB) Reset(root common.Hash) error { | |||||||
| 	s.logSize = 0 | 	s.logSize = 0 | ||||||
| 	s.preimages = make(map[common.Hash][]byte) | 	s.preimages = make(map[common.Hash][]byte) | ||||||
| 	s.clearJournalAndRefund() | 	s.clearJournalAndRefund() | ||||||
|  | 
 | ||||||
|  | 	if s.snaps != nil { | ||||||
|  | 		s.snapAccounts, s.snapDestructs, s.snapStorage = nil, nil, nil | ||||||
|  | 		if s.snap = s.snaps.Snapshot(root); s.snap != nil { | ||||||
|  | 			s.snapDestructs = make(map[common.Hash]struct{}) | ||||||
|  | 			s.snapAccounts = make(map[common.Hash][]byte) | ||||||
|  | 			s.snapStorage = make(map[common.Hash]map[common.Hash][]byte) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| @ -438,6 +466,14 @@ func (s *StateDB) updateStateObject(obj *stateObject) { | |||||||
| 		panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) | 		panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err)) | ||||||
| 	} | 	} | ||||||
| 	s.setError(s.trie.TryUpdate(addr[:], data)) | 	s.setError(s.trie.TryUpdate(addr[:], data)) | ||||||
|  | 
 | ||||||
|  | 	// If state snapshotting is active, cache the data til commit. Note, this
 | ||||||
|  | 	// update mechanism is not symmetric to the deletion, because whereas it is
 | ||||||
|  | 	// enough to track account updates at commit time, deletions need tracking
 | ||||||
|  | 	// at transaction boundary level to ensure we capture state clearing.
 | ||||||
|  | 	if s.snap != nil { | ||||||
|  | 		s.snapAccounts[obj.addrHash] = snapshot.AccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // deleteStateObject removes the given object from the state trie.
 | // deleteStateObject removes the given object from the state trie.
 | ||||||
| @ -470,20 +506,44 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { | |||||||
| 	if obj := s.stateObjects[addr]; obj != nil { | 	if obj := s.stateObjects[addr]; obj != nil { | ||||||
| 		return obj | 		return obj | ||||||
| 	} | 	} | ||||||
| 	// Track the amount of time wasted on loading the object from the database
 | 	// If no live objects are available, attempt to use snapshots
 | ||||||
| 	if metrics.EnabledExpensive { | 	var ( | ||||||
| 		defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) | 		data Account | ||||||
|  | 		err  error | ||||||
|  | 	) | ||||||
|  | 	if s.snap != nil { | ||||||
|  | 		if metrics.EnabledExpensive { | ||||||
|  | 			defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) | ||||||
|  | 		} | ||||||
|  | 		var acc *snapshot.Account | ||||||
|  | 		if acc, err = s.snap.Account(crypto.Keccak256Hash(addr[:])); err == nil { | ||||||
|  | 			if acc == nil { | ||||||
|  | 				return nil | ||||||
|  | 			} | ||||||
|  | 			data.Nonce, data.Balance, data.CodeHash = acc.Nonce, acc.Balance, acc.CodeHash | ||||||
|  | 			if len(data.CodeHash) == 0 { | ||||||
|  | 				data.CodeHash = emptyCodeHash | ||||||
|  | 			} | ||||||
|  | 			data.Root = common.BytesToHash(acc.Root) | ||||||
|  | 			if data.Root == (common.Hash{}) { | ||||||
|  | 				data.Root = emptyRoot | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	// Load the object from the database
 | 	// If snapshot unavailable or reading from it failed, load from the database
 | ||||||
| 	enc, err := s.trie.TryGet(addr[:]) | 	if s.snap == nil || err != nil { | ||||||
| 	if len(enc) == 0 { | 		if metrics.EnabledExpensive { | ||||||
| 		s.setError(err) | 			defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) | ||||||
| 		return nil | 		} | ||||||
| 	} | 		enc, err := s.trie.TryGet(addr[:]) | ||||||
| 	var data Account | 		if len(enc) == 0 { | ||||||
| 	if err := rlp.DecodeBytes(enc, &data); err != nil { | 			s.setError(err) | ||||||
| 		log.Error("Failed to decode state object", "addr", addr, "err", err) | 			return nil | ||||||
| 		return nil | 		} | ||||||
|  | 		if err := rlp.DecodeBytes(enc, &data); err != nil { | ||||||
|  | 			log.Error("Failed to decode state object", "addr", addr, "err", err) | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 	// Insert into the live set
 | 	// Insert into the live set
 | ||||||
| 	obj := newObject(s, addr, data) | 	obj := newObject(s, addr, data) | ||||||
| @ -509,12 +569,19 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { | |||||||
| func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { | func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { | ||||||
| 	prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
 | 	prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
 | ||||||
| 
 | 
 | ||||||
|  | 	var prevdestruct bool | ||||||
|  | 	if s.snap != nil && prev != nil { | ||||||
|  | 		_, prevdestruct = s.snapDestructs[prev.addrHash] | ||||||
|  | 		if !prevdestruct { | ||||||
|  | 			s.snapDestructs[prev.addrHash] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 	newobj = newObject(s, addr, Account{}) | 	newobj = newObject(s, addr, Account{}) | ||||||
| 	newobj.setNonce(0) // sets the object to dirty
 | 	newobj.setNonce(0) // sets the object to dirty
 | ||||||
| 	if prev == nil { | 	if prev == nil { | ||||||
| 		s.journal.append(createObjectChange{account: &addr}) | 		s.journal.append(createObjectChange{account: &addr}) | ||||||
| 	} else { | 	} else { | ||||||
| 		s.journal.append(resetObjectChange{prev: prev}) | 		s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) | ||||||
| 	} | 	} | ||||||
| 	s.setStateObject(newobj) | 	s.setStateObject(newobj) | ||||||
| 	return newobj, prev | 	return newobj, prev | ||||||
| @ -673,6 +740,16 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { | |||||||
| 		} | 		} | ||||||
| 		if obj.suicided || (deleteEmptyObjects && obj.empty()) { | 		if obj.suicided || (deleteEmptyObjects && obj.empty()) { | ||||||
| 			obj.deleted = true | 			obj.deleted = true | ||||||
|  | 
 | ||||||
|  | 			// If state snapshotting is active, also mark the destruction there.
 | ||||||
|  | 			// Note, we can't do this only at the end of a block because multiple
 | ||||||
|  | 			// transactions within the same block might self destruct and then
 | ||||||
|  | 			// ressurrect an account; but the snapshotter needs both events.
 | ||||||
|  | 			if s.snap != nil { | ||||||
|  | 				s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
 | ||||||
|  | 				delete(s.snapAccounts, obj.addrHash)       // Clear out any previously updated account data (may be recreated via a ressurrect)
 | ||||||
|  | 				delete(s.snapStorage, obj.addrHash)        // Clear out any previously updated storage data (may be recreated via a ressurrect)
 | ||||||
|  | 			} | ||||||
| 		} else { | 		} else { | ||||||
| 			obj.finalise() | 			obj.finalise() | ||||||
| 		} | 		} | ||||||
| @ -748,13 +825,14 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { | |||||||
| 		s.stateObjectsDirty = make(map[common.Address]struct{}) | 		s.stateObjectsDirty = make(map[common.Address]struct{}) | ||||||
| 	} | 	} | ||||||
| 	// Write the account trie changes, measuing the amount of wasted time
 | 	// Write the account trie changes, measuing the amount of wasted time
 | ||||||
|  | 	var start time.Time | ||||||
| 	if metrics.EnabledExpensive { | 	if metrics.EnabledExpensive { | ||||||
| 		defer func(start time.Time) { s.AccountCommits += time.Since(start) }(time.Now()) | 		start = time.Now() | ||||||
| 	} | 	} | ||||||
| 	// The onleaf func is called _serially_, so we can reuse the same account
 | 	// The onleaf func is called _serially_, so we can reuse the same account
 | ||||||
| 	// for unmarshalling every time.
 | 	// for unmarshalling every time.
 | ||||||
| 	var account Account | 	var account Account | ||||||
| 	return s.trie.Commit(func(leaf []byte, parent common.Hash) error { | 	root, err := s.trie.Commit(func(leaf []byte, parent common.Hash) error { | ||||||
| 		if err := rlp.DecodeBytes(leaf, &account); err != nil { | 		if err := rlp.DecodeBytes(leaf, &account); err != nil { | ||||||
| 			return nil | 			return nil | ||||||
| 		} | 		} | ||||||
| @ -767,4 +845,24 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { | |||||||
| 		} | 		} | ||||||
| 		return nil | 		return nil | ||||||
| 	}) | 	}) | ||||||
|  | 	if metrics.EnabledExpensive { | ||||||
|  | 		s.AccountCommits += time.Since(start) | ||||||
|  | 	} | ||||||
|  | 	// If snapshotting is enabled, update the snapshot tree with this new version
 | ||||||
|  | 	if s.snap != nil { | ||||||
|  | 		if metrics.EnabledExpensive { | ||||||
|  | 			defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) | ||||||
|  | 		} | ||||||
|  | 		// Only update if there's a state transition (skip empty Clique blocks)
 | ||||||
|  | 		if parent := s.snap.Root(); parent != root { | ||||||
|  | 			if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil { | ||||||
|  | 				log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) | ||||||
|  | 			} | ||||||
|  | 			if err := s.snaps.Cap(root, 127); err != nil { // Persistent layer is 128th, the last available trie
 | ||||||
|  | 				log.Warn("Failed to cap snapshot tree", "root", root, "layers", 127, "err", err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil | ||||||
|  | 	} | ||||||
|  | 	return root, err | ||||||
| } | } | ||||||
|  | |||||||
| @ -39,7 +39,7 @@ import ( | |||||||
| func TestUpdateLeaks(t *testing.T) { | func TestUpdateLeaks(t *testing.T) { | ||||||
| 	// Create an empty state database
 | 	// Create an empty state database
 | ||||||
| 	db := rawdb.NewMemoryDatabase() | 	db := rawdb.NewMemoryDatabase() | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(db)) | 	state, _ := New(common.Hash{}, NewDatabase(db), nil) | ||||||
| 
 | 
 | ||||||
| 	// Update it with some accounts
 | 	// Update it with some accounts
 | ||||||
| 	for i := byte(0); i < 255; i++ { | 	for i := byte(0); i < 255; i++ { | ||||||
| @ -73,8 +73,8 @@ func TestIntermediateLeaks(t *testing.T) { | |||||||
| 	// Create two state databases, one transitioning to the final state, the other final from the beginning
 | 	// Create two state databases, one transitioning to the final state, the other final from the beginning
 | ||||||
| 	transDb := rawdb.NewMemoryDatabase() | 	transDb := rawdb.NewMemoryDatabase() | ||||||
| 	finalDb := rawdb.NewMemoryDatabase() | 	finalDb := rawdb.NewMemoryDatabase() | ||||||
| 	transState, _ := New(common.Hash{}, NewDatabase(transDb)) | 	transState, _ := New(common.Hash{}, NewDatabase(transDb), nil) | ||||||
| 	finalState, _ := New(common.Hash{}, NewDatabase(finalDb)) | 	finalState, _ := New(common.Hash{}, NewDatabase(finalDb), nil) | ||||||
| 
 | 
 | ||||||
| 	modify := func(state *StateDB, addr common.Address, i, tweak byte) { | 	modify := func(state *StateDB, addr common.Address, i, tweak byte) { | ||||||
| 		state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak))) | 		state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak))) | ||||||
| @ -149,7 +149,7 @@ func TestIntermediateLeaks(t *testing.T) { | |||||||
| // https://github.com/ethereum/go-ethereum/pull/15549.
 | // https://github.com/ethereum/go-ethereum/pull/15549.
 | ||||||
| func TestCopy(t *testing.T) { | func TestCopy(t *testing.T) { | ||||||
| 	// Create a random state test to copy and modify "independently"
 | 	// Create a random state test to copy and modify "independently"
 | ||||||
| 	orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	orig, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 
 | 
 | ||||||
| 	for i := byte(0); i < 255; i++ { | 	for i := byte(0); i < 255; i++ { | ||||||
| 		obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i})) | 		obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i})) | ||||||
| @ -385,7 +385,7 @@ func (test *snapshotTest) String() string { | |||||||
| func (test *snapshotTest) run() bool { | func (test *snapshotTest) run() bool { | ||||||
| 	// Run all actions and create snapshots.
 | 	// Run all actions and create snapshots.
 | ||||||
| 	var ( | 	var ( | ||||||
| 		state, _     = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 		state, _     = New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		snapshotRevs = make([]int, len(test.snapshots)) | 		snapshotRevs = make([]int, len(test.snapshots)) | ||||||
| 		sindex       = 0 | 		sindex       = 0 | ||||||
| 	) | 	) | ||||||
| @ -399,7 +399,7 @@ func (test *snapshotTest) run() bool { | |||||||
| 	// Revert all snapshots in reverse order. Each revert must yield a state
 | 	// Revert all snapshots in reverse order. Each revert must yield a state
 | ||||||
| 	// that is equivalent to fresh state with all actions up the snapshot applied.
 | 	// that is equivalent to fresh state with all actions up the snapshot applied.
 | ||||||
| 	for sindex--; sindex >= 0; sindex-- { | 	for sindex--; sindex >= 0; sindex-- { | ||||||
| 		checkstate, _ := New(common.Hash{}, state.Database()) | 		checkstate, _ := New(common.Hash{}, state.Database(), nil) | ||||||
| 		for _, action := range test.actions[:test.snapshots[sindex]] { | 		for _, action := range test.actions[:test.snapshots[sindex]] { | ||||||
| 			action.fn(action, checkstate) | 			action.fn(action, checkstate) | ||||||
| 		} | 		} | ||||||
| @ -477,7 +477,7 @@ func TestTouchDelete(t *testing.T) { | |||||||
| // TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
 | // TestCopyOfCopy tests that modified objects are carried over to the copy, and the copy of the copy.
 | ||||||
| // See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
 | // See https://github.com/ethereum/go-ethereum/pull/15225#issuecomment-380191512
 | ||||||
| func TestCopyOfCopy(t *testing.T) { | func TestCopyOfCopy(t *testing.T) { | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	addr := common.HexToAddress("aaaa") | 	addr := common.HexToAddress("aaaa") | ||||||
| 	state.SetBalance(addr, big.NewInt(42)) | 	state.SetBalance(addr, big.NewInt(42)) | ||||||
| 
 | 
 | ||||||
| @ -494,7 +494,7 @@ func TestCopyOfCopy(t *testing.T) { | |||||||
| //
 | //
 | ||||||
| // See https://github.com/ethereum/go-ethereum/issues/20106.
 | // See https://github.com/ethereum/go-ethereum/issues/20106.
 | ||||||
| func TestCopyCommitCopy(t *testing.T) { | func TestCopyCommitCopy(t *testing.T) { | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 
 | 
 | ||||||
| 	// Create an account and check if the retrieved balance is correct
 | 	// Create an account and check if the retrieved balance is correct
 | ||||||
| 	addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") | 	addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") | ||||||
| @ -566,7 +566,7 @@ func TestCopyCommitCopy(t *testing.T) { | |||||||
| //
 | //
 | ||||||
| // See https://github.com/ethereum/go-ethereum/issues/20106.
 | // See https://github.com/ethereum/go-ethereum/issues/20106.
 | ||||||
| func TestCopyCopyCommitCopy(t *testing.T) { | func TestCopyCopyCommitCopy(t *testing.T) { | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 
 | 
 | ||||||
| 	// Create an account and check if the retrieved balance is correct
 | 	// Create an account and check if the retrieved balance is correct
 | ||||||
| 	addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") | 	addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") | ||||||
| @ -656,7 +656,7 @@ func TestCopyCopyCommitCopy(t *testing.T) { | |||||||
| // first, but the journal wiped the entire state object on create-revert.
 | // first, but the journal wiped the entire state object on create-revert.
 | ||||||
| func TestDeleteCreateRevert(t *testing.T) { | func TestDeleteCreateRevert(t *testing.T) { | ||||||
| 	// Create an initial state with a single contract
 | 	// Create an initial state with a single contract
 | ||||||
| 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 
 | 
 | ||||||
| 	addr := toAddr([]byte("so")) | 	addr := toAddr([]byte("so")) | ||||||
| 	state.SetBalance(addr, big.NewInt(1)) | 	state.SetBalance(addr, big.NewInt(1)) | ||||||
|  | |||||||
| @ -41,7 +41,7 @@ type testAccount struct { | |||||||
| func makeTestState() (Database, common.Hash, []*testAccount) { | func makeTestState() (Database, common.Hash, []*testAccount) { | ||||||
| 	// Create an empty state
 | 	// Create an empty state
 | ||||||
| 	db := NewDatabase(rawdb.NewMemoryDatabase()) | 	db := NewDatabase(rawdb.NewMemoryDatabase()) | ||||||
| 	state, _ := New(common.Hash{}, db) | 	state, _ := New(common.Hash{}, db, nil) | ||||||
| 
 | 
 | ||||||
| 	// Fill it with some arbitrary data
 | 	// Fill it with some arbitrary data
 | ||||||
| 	accounts := []*testAccount{} | 	accounts := []*testAccount{} | ||||||
| @ -72,7 +72,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) { | |||||||
| // account array.
 | // account array.
 | ||||||
| func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { | func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { | ||||||
| 	// Check root availability and state contents
 | 	// Check root availability and state contents
 | ||||||
| 	state, err := New(root, NewDatabase(db)) | 	state, err := New(root, NewDatabase(db), nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("failed to create state trie at %x: %v", root, err) | 		t.Fatalf("failed to create state trie at %x: %v", root, err) | ||||||
| 	} | 	} | ||||||
| @ -113,7 +113,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { | |||||||
| 	if _, err := db.Get(root.Bytes()); err != nil { | 	if _, err := db.Get(root.Bytes()); err != nil { | ||||||
| 		return nil // Consider a non existent state consistent.
 | 		return nil // Consider a non existent state consistent.
 | ||||||
| 	} | 	} | ||||||
| 	state, err := New(root, NewDatabase(db)) | 	state, err := New(root, NewDatabase(db), nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | |||||||
| @ -54,6 +54,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c | |||||||
| 		gaspool = new(GasPool).AddGas(block.GasLimit()) | 		gaspool = new(GasPool).AddGas(block.GasLimit()) | ||||||
| 	) | 	) | ||||||
| 	// Iterate over and process the individual transactions
 | 	// Iterate over and process the individual transactions
 | ||||||
|  | 	byzantium := p.config.IsByzantium(block.Number()) | ||||||
| 	for i, tx := range block.Transactions() { | 	for i, tx := range block.Transactions() { | ||||||
| 		// If block precaching was interrupted, abort
 | 		// If block precaching was interrupted, abort
 | ||||||
| 		if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { | 		if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { | ||||||
| @ -64,6 +65,14 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c | |||||||
| 		if err := precacheTransaction(p.config, p.bc, nil, gaspool, statedb, header, tx, cfg); err != nil { | 		if err := precacheTransaction(p.config, p.bc, nil, gaspool, statedb, header, tx, cfg); err != nil { | ||||||
| 			return // Ugh, something went horribly wrong, bail out
 | 			return // Ugh, something went horribly wrong, bail out
 | ||||||
| 		} | 		} | ||||||
|  | 		// If we're pre-byzantium, pre-load trie nodes for the intermediate root
 | ||||||
|  | 		if !byzantium { | ||||||
|  | 			statedb.IntermediateRoot(true) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// If were post-byzantium, pre-load trie nodes for the final root hash
 | ||||||
|  | 	if byzantium { | ||||||
|  | 		statedb.IntermediateRoot(true) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -86,7 +86,7 @@ func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { | func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	key, _ := crypto.GenerateKey() | 	key, _ := crypto.GenerateKey() | ||||||
| @ -171,7 +171,7 @@ func (c *testChain) State() (*state.StateDB, error) { | |||||||
| 	// a state change between those fetches.
 | 	// a state change between those fetches.
 | ||||||
| 	stdb := c.statedb | 	stdb := c.statedb | ||||||
| 	if *c.trigger { | 	if *c.trigger { | ||||||
| 		c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		c.statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		// simulate that the new head block included tx0 and tx1
 | 		// simulate that the new head block included tx0 and tx1
 | ||||||
| 		c.statedb.SetNonce(c.address, 2) | 		c.statedb.SetNonce(c.address, 2) | ||||||
| 		c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether)) | 		c.statedb.SetBalance(c.address, new(big.Int).SetUint64(params.Ether)) | ||||||
| @ -189,7 +189,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) { | |||||||
| 	var ( | 	var ( | ||||||
| 		key, _     = crypto.GenerateKey() | 		key, _     = crypto.GenerateKey() | ||||||
| 		address    = crypto.PubkeyToAddress(key.PublicKey) | 		address    = crypto.PubkeyToAddress(key.PublicKey) | ||||||
| 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		trigger    = false | 		trigger    = false | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| @ -345,7 +345,7 @@ func TestTransactionChainFork(t *testing.T) { | |||||||
| 
 | 
 | ||||||
| 	addr := crypto.PubkeyToAddress(key.PublicKey) | 	addr := crypto.PubkeyToAddress(key.PublicKey) | ||||||
| 	resetState := func() { | 	resetState := func() { | ||||||
| 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		statedb.AddBalance(addr, big.NewInt(100000000000000)) | 		statedb.AddBalance(addr, big.NewInt(100000000000000)) | ||||||
| 
 | 
 | ||||||
| 		pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} | 		pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| @ -374,7 +374,7 @@ func TestTransactionDoubleNonce(t *testing.T) { | |||||||
| 
 | 
 | ||||||
| 	addr := crypto.PubkeyToAddress(key.PublicKey) | 	addr := crypto.PubkeyToAddress(key.PublicKey) | ||||||
| 	resetState := func() { | 	resetState := func() { | ||||||
| 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		statedb.AddBalance(addr, big.NewInt(100000000000000)) | 		statedb.AddBalance(addr, big.NewInt(100000000000000)) | ||||||
| 
 | 
 | ||||||
| 		pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} | 		pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| @ -565,7 +565,7 @@ func TestTransactionPostponing(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the postponing with
 | 	// Create the pool to test the postponing with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
| @ -778,7 +778,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the limit enforcement with
 | 	// Create the pool to test the limit enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -866,7 +866,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) { | |||||||
| 	evictionInterval = time.Second | 	evictionInterval = time.Second | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the non-expiration enforcement
 | 	// Create the pool to test the non-expiration enforcement
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -969,7 +969,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the limit enforcement with
 | 	// Create the pool to test the limit enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1071,7 +1071,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the limit enforcement with
 | 	// Create the pool to test the limit enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1105,7 +1105,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the limit enforcement with
 | 	// Create the pool to test the limit enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1153,7 +1153,7 @@ func TestTransactionPoolRepricing(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
| @ -1274,7 +1274,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
| @ -1336,7 +1336,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1442,7 +1442,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1507,7 +1507,7 @@ func TestTransactionDeduplication(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
| @ -1573,7 +1573,7 @@ func TestTransactionReplacement(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the pricing enforcement with
 | 	// Create the pool to test the pricing enforcement with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
| @ -1668,7 +1668,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) { | |||||||
| 	os.Remove(journal) | 	os.Remove(journal) | ||||||
| 
 | 
 | ||||||
| 	// Create the original pool to inject transaction into the journal
 | 	// Create the original pool to inject transaction into the journal
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	config := testTxPoolConfig | 	config := testTxPoolConfig | ||||||
| @ -1766,7 +1766,7 @@ func TestTransactionStatusCheck(t *testing.T) { | |||||||
| 	t.Parallel() | 	t.Parallel() | ||||||
| 
 | 
 | ||||||
| 	// Create the pool to test the status retrievals with
 | 	// Create the pool to test the status retrievals with
 | ||||||
| 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | 	blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)} | ||||||
| 
 | 
 | ||||||
| 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | 	pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) | ||||||
|  | |||||||
| @ -81,7 +81,7 @@ func TestEIP2200(t *testing.T) { | |||||||
| 	for i, tt := range eip2200Tests { | 	for i, tt := range eip2200Tests { | ||||||
| 		address := common.BytesToAddress([]byte("contract")) | 		address := common.BytesToAddress([]byte("contract")) | ||||||
| 
 | 
 | ||||||
| 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		statedb.CreateAccount(address) | 		statedb.CreateAccount(address) | ||||||
| 		statedb.SetCode(address, hexutil.MustDecode(tt.input)) | 		statedb.SetCode(address, hexutil.MustDecode(tt.input)) | ||||||
| 		statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) | 		statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) | ||||||
|  | |||||||
| @ -70,7 +70,7 @@ const ( | |||||||
| 	SHR | 	SHR | ||||||
| 	SAR | 	SAR | ||||||
| 
 | 
 | ||||||
| 	SHA3 = 0x20 | 	SHA3 OpCode = 0x20 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // 0x30 range - closure state.
 | // 0x30 range - closure state.
 | ||||||
| @ -101,8 +101,8 @@ const ( | |||||||
| 	NUMBER | 	NUMBER | ||||||
| 	DIFFICULTY | 	DIFFICULTY | ||||||
| 	GASLIMIT | 	GASLIMIT | ||||||
| 	CHAINID     = 0x46 | 	CHAINID     OpCode = 0x46 | ||||||
| 	SELFBALANCE = 0x47 | 	SELFBALANCE OpCode = 0x47 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // 0x50 range - 'storage' and execution.
 | // 0x50 range - 'storage' and execution.
 | ||||||
| @ -213,10 +213,9 @@ const ( | |||||||
| 	RETURN | 	RETURN | ||||||
| 	DELEGATECALL | 	DELEGATECALL | ||||||
| 	CREATE2 | 	CREATE2 | ||||||
| 	STATICCALL = 0xfa | 	STATICCALL   OpCode = 0xfa | ||||||
| 
 | 	REVERT       OpCode = 0xfd | ||||||
| 	REVERT       = 0xfd | 	SELFDESTRUCT OpCode = 0xff | ||||||
| 	SELFDESTRUCT = 0xff |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Since the opcodes aren't all in order we can't use a regular slice.
 | // Since the opcodes aren't all in order we can't use a regular slice.
 | ||||||
|  | |||||||
| @ -99,7 +99,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { | |||||||
| 	setDefaults(cfg) | 	setDefaults(cfg) | ||||||
| 
 | 
 | ||||||
| 	if cfg.State == nil { | 	if cfg.State == nil { | ||||||
| 		cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	} | 	} | ||||||
| 	var ( | 	var ( | ||||||
| 		address = common.BytesToAddress([]byte("contract")) | 		address = common.BytesToAddress([]byte("contract")) | ||||||
| @ -129,7 +129,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { | |||||||
| 	setDefaults(cfg) | 	setDefaults(cfg) | ||||||
| 
 | 
 | ||||||
| 	if cfg.State == nil { | 	if cfg.State == nil { | ||||||
| 		cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	} | 	} | ||||||
| 	var ( | 	var ( | ||||||
| 		vmenv  = NewEnv(cfg) | 		vmenv  = NewEnv(cfg) | ||||||
|  | |||||||
| @ -98,7 +98,7 @@ func TestExecute(t *testing.T) { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestCall(t *testing.T) { | func TestCall(t *testing.T) { | ||||||
| 	state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 	state, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 	address := common.HexToAddress("0x0a") | 	address := common.HexToAddress("0x0a") | ||||||
| 	state.SetCode(address, []byte{ | 	state.SetCode(address, []byte{ | ||||||
| 		byte(vm.PUSH1), 10, | 		byte(vm.PUSH1), 10, | ||||||
| @ -154,7 +154,7 @@ func BenchmarkCall(b *testing.B) { | |||||||
| } | } | ||||||
| func benchmarkEVM_Create(bench *testing.B, code string) { | func benchmarkEVM_Create(bench *testing.B, code string) { | ||||||
| 	var ( | 	var ( | ||||||
| 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		sender     = common.BytesToAddress([]byte("sender")) | 		sender     = common.BytesToAddress([]byte("sender")) | ||||||
| 		receiver   = common.BytesToAddress([]byte("receiver")) | 		receiver   = common.BytesToAddress([]byte("receiver")) | ||||||
| 	) | 	) | ||||||
|  | |||||||
| @ -64,7 +64,7 @@ func (h resultHash) Less(i, j int) bool { return bytes.Compare(h[i].Bytes(), h[j | |||||||
| func TestAccountRange(t *testing.T) { | func TestAccountRange(t *testing.T) { | ||||||
| 	var ( | 	var ( | ||||||
| 		statedb  = state.NewDatabase(rawdb.NewMemoryDatabase()) | 		statedb  = state.NewDatabase(rawdb.NewMemoryDatabase()) | ||||||
| 		state, _ = state.New(common.Hash{}, statedb) | 		state, _ = state.New(common.Hash{}, statedb, nil) | ||||||
| 		addrs    = [AccountRangeMaxResults * 2]common.Address{} | 		addrs    = [AccountRangeMaxResults * 2]common.Address{} | ||||||
| 		m        = map[common.Address]bool{} | 		m        = map[common.Address]bool{} | ||||||
| 	) | 	) | ||||||
| @ -162,7 +162,7 @@ func TestAccountRange(t *testing.T) { | |||||||
| func TestEmptyAccountRange(t *testing.T) { | func TestEmptyAccountRange(t *testing.T) { | ||||||
| 	var ( | 	var ( | ||||||
| 		statedb  = state.NewDatabase(rawdb.NewMemoryDatabase()) | 		statedb  = state.NewDatabase(rawdb.NewMemoryDatabase()) | ||||||
| 		state, _ = state.New(common.Hash{}, statedb) | 		state, _ = state.New(common.Hash{}, statedb, nil) | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	state.Commit(true) | 	state.Commit(true) | ||||||
| @ -188,7 +188,7 @@ func TestEmptyAccountRange(t *testing.T) { | |||||||
| func TestStorageRangeAt(t *testing.T) { | func TestStorageRangeAt(t *testing.T) { | ||||||
| 	// Create a state where account 0x010000... has a few storage entries.
 | 	// Create a state where account 0x010000... has a few storage entries.
 | ||||||
| 	var ( | 	var ( | ||||||
| 		state, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase())) | 		state, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) | ||||||
| 		addr     = common.Address{0x01} | 		addr     = common.Address{0x01} | ||||||
| 		keys     = []common.Hash{ // hashes of Keys of storage
 | 		keys     = []common.Hash{ // hashes of Keys of storage
 | ||||||
| 			common.HexToHash("340dd630ad21bf010b4e676dbfa9ba9a02175262d1fa356232cfde6cb5b47ef2"), | 			common.HexToHash("340dd630ad21bf010b4e676dbfa9ba9a02175262d1fa356232cfde6cb5b47ef2"), | ||||||
|  | |||||||
| @ -155,7 +155,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl | |||||||
| 			return nil, fmt.Errorf("parent block #%d not found", number-1) | 			return nil, fmt.Errorf("parent block #%d not found", number-1) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	statedb, err := state.New(start.Root(), database) | 	statedb, err := state.New(start.Root(), database, nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		// If the starting state is missing, allow some number of blocks to be reexecuted
 | 		// If the starting state is missing, allow some number of blocks to be reexecuted
 | ||||||
| 		reexec := defaultTraceReexec | 		reexec := defaultTraceReexec | ||||||
| @ -168,7 +168,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl | |||||||
| 			if start == nil { | 			if start == nil { | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 			if statedb, err = state.New(start.Root(), database); err == nil { | 			if statedb, err = state.New(start.Root(), database, nil); err == nil { | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| @ -648,7 +648,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* | |||||||
| 		if block == nil { | 		if block == nil { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
| 		if statedb, err = state.New(block.Root(), database); err == nil { | 		if statedb, err = state.New(block.Root(), database, nil); err == nil { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | |||||||
| @ -127,7 +127,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { | |||||||
| 		config.Miner.GasPrice = new(big.Int).Set(DefaultConfig.Miner.GasPrice) | 		config.Miner.GasPrice = new(big.Int).Set(DefaultConfig.Miner.GasPrice) | ||||||
| 	} | 	} | ||||||
| 	if config.NoPruning && config.TrieDirtyCache > 0 { | 	if config.NoPruning && config.TrieDirtyCache > 0 { | ||||||
| 		config.TrieCleanCache += config.TrieDirtyCache | 		config.TrieCleanCache += config.TrieDirtyCache * 3 / 5 | ||||||
|  | 		config.SnapshotCache += config.TrieDirtyCache * 3 / 5 | ||||||
| 		config.TrieDirtyCache = 0 | 		config.TrieDirtyCache = 0 | ||||||
| 	} | 	} | ||||||
| 	log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) | 	log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) | ||||||
| @ -184,6 +185,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { | |||||||
| 			TrieDirtyLimit:      config.TrieDirtyCache, | 			TrieDirtyLimit:      config.TrieDirtyCache, | ||||||
| 			TrieDirtyDisabled:   config.NoPruning, | 			TrieDirtyDisabled:   config.NoPruning, | ||||||
| 			TrieTimeLimit:       config.TrieTimeout, | 			TrieTimeLimit:       config.TrieTimeout, | ||||||
|  | 			SnapshotLimit:       config.SnapshotCache, | ||||||
| 		} | 		} | ||||||
| 	) | 	) | ||||||
| 	eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve) | 	eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve) | ||||||
| @ -204,7 +206,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { | |||||||
| 	eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) | 	eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) | ||||||
| 
 | 
 | ||||||
| 	// Permit the downloader to use the trie cache allowance during fast sync
 | 	// Permit the downloader to use the trie cache allowance during fast sync
 | ||||||
| 	cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit | 	cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit | ||||||
| 	checkpoint := config.Checkpoint | 	checkpoint := config.Checkpoint | ||||||
| 	if checkpoint == nil { | 	if checkpoint == nil { | ||||||
| 		checkpoint = params.TrustedCheckpoints[genesisHash] | 		checkpoint = params.TrustedCheckpoints[genesisHash] | ||||||
|  | |||||||
| @ -50,6 +50,7 @@ var DefaultConfig = Config{ | |||||||
| 	TrieCleanCache:     256, | 	TrieCleanCache:     256, | ||||||
| 	TrieDirtyCache:     256, | 	TrieDirtyCache:     256, | ||||||
| 	TrieTimeout:        60 * time.Minute, | 	TrieTimeout:        60 * time.Minute, | ||||||
|  | 	SnapshotCache:      256, | ||||||
| 	Miner: miner.Config{ | 	Miner: miner.Config{ | ||||||
| 		GasFloor: 8000000, | 		GasFloor: 8000000, | ||||||
| 		GasCeil:  8000000, | 		GasCeil:  8000000, | ||||||
| @ -125,6 +126,7 @@ type Config struct { | |||||||
| 	TrieCleanCache int | 	TrieCleanCache int | ||||||
| 	TrieDirtyCache int | 	TrieDirtyCache int | ||||||
| 	TrieTimeout    time.Duration | 	TrieTimeout    time.Duration | ||||||
|  | 	SnapshotCache  int | ||||||
| 
 | 
 | ||||||
| 	// Mining options
 | 	// Mining options
 | ||||||
| 	Miner miner.Config | 	Miner miner.Config | ||||||
|  | |||||||
| @ -349,7 +349,7 @@ func testGetNodeData(t *testing.T, protocol int) { | |||||||
| 	} | 	} | ||||||
| 	accounts := []common.Address{testBank, acc1Addr, acc2Addr} | 	accounts := []common.Address{testBank, acc1Addr, acc2Addr} | ||||||
| 	for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { | 	for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { | ||||||
| 		trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb)) | 		trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb), nil) | ||||||
| 
 | 
 | ||||||
| 		for j, acc := range accounts { | 		for j, acc := range accounts { | ||||||
| 			state, _ := pm.blockchain.State() | 			state, _ := pm.blockchain.State() | ||||||
|  | |||||||
| @ -168,7 +168,7 @@ func TestPrestateTracerCreate2(t *testing.T) { | |||||||
| 		Code:    []byte{}, | 		Code:    []byte{}, | ||||||
| 		Balance: big.NewInt(500000000000000), | 		Balance: big.NewInt(500000000000000), | ||||||
| 	} | 	} | ||||||
| 	statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc) | 	statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) | ||||||
| 
 | 
 | ||||||
| 	// Create the tracer, the EVM environment and run it
 | 	// Create the tracer, the EVM environment and run it
 | ||||||
| 	tracer, err := New("prestateTracer") | 	tracer, err := New("prestateTracer") | ||||||
| @ -242,7 +242,7 @@ func TestCallTracer(t *testing.T) { | |||||||
| 				GasLimit:    uint64(test.Context.GasLimit), | 				GasLimit:    uint64(test.Context.GasLimit), | ||||||
| 				GasPrice:    tx.GasPrice(), | 				GasPrice:    tx.GasPrice(), | ||||||
| 			} | 			} | ||||||
| 			statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc) | 			statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) | ||||||
| 
 | 
 | ||||||
| 			// Create the tracer, the EVM environment and run it
 | 			// Create the tracer, the EVM environment and run it
 | ||||||
| 			tracer, err := New("callTracer") | 			tracer, err := New("callTracer") | ||||||
|  | |||||||
| @ -91,7 +91,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon | |||||||
| 	for _, addr := range acc { | 	for _, addr := range acc { | ||||||
| 		if bc != nil { | 		if bc != nil { | ||||||
| 			header := bc.GetHeaderByHash(bhash) | 			header := bc.GetHeaderByHash(bhash) | ||||||
| 			st, err = state.New(header.Root, state.NewDatabase(db)) | 			st, err = state.New(header.Root, state.NewDatabase(db), nil) | ||||||
| 		} else { | 		} else { | ||||||
| 			header := lc.GetHeaderByHash(bhash) | 			header := lc.GetHeaderByHash(bhash) | ||||||
| 			st = light.NewState(ctx, header, lc.Odr()) | 			st = light.NewState(ctx, header, lc.Odr()) | ||||||
| @ -122,7 +122,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai | |||||||
| 		data[35] = byte(i) | 		data[35] = byte(i) | ||||||
| 		if bc != nil { | 		if bc != nil { | ||||||
| 			header := bc.GetHeaderByHash(bhash) | 			header := bc.GetHeaderByHash(bhash) | ||||||
| 			statedb, err := state.New(header.Root, state.NewDatabase(db)) | 			statedb, err := state.New(header.Root, state.NewDatabase(db), nil) | ||||||
| 
 | 
 | ||||||
| 			if err == nil { | 			if err == nil { | ||||||
| 				from := statedb.GetOrNewStateObject(bankAddr) | 				from := statedb.GetOrNewStateObject(bankAddr) | ||||||
|  | |||||||
| @ -149,7 +149,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc | |||||||
| 		st = NewState(ctx, header, lc.Odr()) | 		st = NewState(ctx, header, lc.Odr()) | ||||||
| 	} else { | 	} else { | ||||||
| 		header := bc.GetHeaderByHash(bhash) | 		header := bc.GetHeaderByHash(bhash) | ||||||
| 		st, _ = state.New(header.Root, state.NewDatabase(db)) | 		st, _ = state.New(header.Root, state.NewDatabase(db), nil) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var res []byte | 	var res []byte | ||||||
| @ -189,7 +189,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain | |||||||
| 		} else { | 		} else { | ||||||
| 			chain = bc | 			chain = bc | ||||||
| 			header = bc.GetHeaderByHash(bhash) | 			header = bc.GetHeaderByHash(bhash) | ||||||
| 			st, _ = state.New(header.Root, state.NewDatabase(db)) | 			st, _ = state.New(header.Root, state.NewDatabase(db), nil) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Perform read-only call.
 | 		// Perform read-only call.
 | ||||||
|  | |||||||
| @ -30,7 +30,7 @@ import ( | |||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func NewState(ctx context.Context, head *types.Header, odr OdrBackend) *state.StateDB { | func NewState(ctx context.Context, head *types.Header, odr OdrBackend) *state.StateDB { | ||||||
| 	state, _ := state.New(head.Root, NewStateDatabase(ctx, head, odr)) | 	state, _ := state.New(head.Root, NewStateDatabase(ctx, head, odr), nil) | ||||||
| 	return state | 	return state | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -45,11 +45,13 @@ func TestBlockchain(t *testing.T) { | |||||||
| 	bt.skipLoad(`.*randomStatetest94.json.*`) | 	bt.skipLoad(`.*randomStatetest94.json.*`) | ||||||
| 
 | 
 | ||||||
| 	bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { | 	bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { | ||||||
| 		if err := bt.checkFailure(t, name, test.Run()); err != nil { | 		if err := bt.checkFailure(t, name+"/trie", test.Run(false)); err != nil { | ||||||
| 			t.Error(err) | 			t.Errorf("test without snapshotter failed: %v", err) | ||||||
|  | 		} | ||||||
|  | 		if err := bt.checkFailure(t, name+"/snap", test.Run(true)); err != nil { | ||||||
|  | 			t.Errorf("test with snapshotter failed: %v", err) | ||||||
| 		} | 		} | ||||||
| 	}) | 	}) | ||||||
| 
 |  | ||||||
| 	// There is also a LegacyTests folder, containing blockchain tests generated
 | 	// There is also a LegacyTests folder, containing blockchain tests generated
 | ||||||
| 	// prior to Istanbul. However, they are all derived from GeneralStateTests,
 | 	// prior to Istanbul. However, they are all derived from GeneralStateTests,
 | ||||||
| 	// which run natively, so there's no reason to run them here.
 | 	// which run natively, so there's no reason to run them here.
 | ||||||
|  | |||||||
| @ -94,7 +94,7 @@ type btHeaderMarshaling struct { | |||||||
| 	Timestamp  math.HexOrDecimal64 | 	Timestamp  math.HexOrDecimal64 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (t *BlockTest) Run() error { | func (t *BlockTest) Run(snapshotter bool) error { | ||||||
| 	config, ok := Forks[t.json.Network] | 	config, ok := Forks[t.json.Network] | ||||||
| 	if !ok { | 	if !ok { | ||||||
| 		return UnsupportedForkError{t.json.Network} | 		return UnsupportedForkError{t.json.Network} | ||||||
| @ -118,7 +118,12 @@ func (t *BlockTest) Run() error { | |||||||
| 	} else { | 	} else { | ||||||
| 		engine = ethash.NewShared() | 		engine = ethash.NewShared() | ||||||
| 	} | 	} | ||||||
| 	chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanLimit: 0}, config, engine, vm.Config{}, nil) | 	cache := &core.CacheConfig{TrieCleanLimit: 0} | ||||||
|  | 	if snapshotter { | ||||||
|  | 		cache.SnapshotLimit = 1 | ||||||
|  | 		cache.SnapshotWait = true | ||||||
|  | 	} | ||||||
|  | 	chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | |||||||
| @ -63,10 +63,17 @@ func TestState(t *testing.T) { | |||||||
| 				subtest := subtest | 				subtest := subtest | ||||||
| 				key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) | 				key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) | ||||||
| 				name := name + "/" + key | 				name := name + "/" + key | ||||||
| 				t.Run(key, func(t *testing.T) { | 
 | ||||||
|  | 				t.Run(key+"/trie", func(t *testing.T) { | ||||||
| 					withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { | 					withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { | ||||||
| 						_, err := test.Run(subtest, vmconfig) | 						_, err := test.Run(subtest, vmconfig, false) | ||||||
| 						return st.checkFailure(t, name, err) | 						return st.checkFailure(t, name+"/trie", err) | ||||||
|  | 					}) | ||||||
|  | 				}) | ||||||
|  | 				t.Run(key+"/snap", func(t *testing.T) { | ||||||
|  | 					withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { | ||||||
|  | 						_, err := test.Run(subtest, vmconfig, true) | ||||||
|  | 						return st.checkFailure(t, name+"/snap", err) | ||||||
| 					}) | 					}) | ||||||
| 				}) | 				}) | ||||||
| 			} | 			} | ||||||
|  | |||||||
| @ -24,6 +24,8 @@ import ( | |||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
|  | 	"github.com/ethereum/go-ethereum/core/state/snapshot" | ||||||
|  | 
 | ||||||
| 	"github.com/ethereum/go-ethereum/common" | 	"github.com/ethereum/go-ethereum/common" | ||||||
| 	"github.com/ethereum/go-ethereum/common/hexutil" | 	"github.com/ethereum/go-ethereum/common/hexutil" | ||||||
| 	"github.com/ethereum/go-ethereum/common/math" | 	"github.com/ethereum/go-ethereum/common/math" | ||||||
| @ -145,8 +147,8 @@ func (t *StateTest) Subtests() []StateSubtest { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Run executes a specific subtest and verifies the post-state and logs
 | // Run executes a specific subtest and verifies the post-state and logs
 | ||||||
| func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, error) { | func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, error) { | ||||||
| 	statedb, root, err := t.RunNoVerify(subtest, vmconfig) | 	statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return statedb, err | 		return statedb, err | ||||||
| 	} | 	} | ||||||
| @ -163,14 +165,14 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // RunNoVerify runs a specific subtest and returns the statedb and post-state root
 | // RunNoVerify runs a specific subtest and returns the statedb and post-state root
 | ||||||
| func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, common.Hash, error) { | func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) { | ||||||
| 	config, eips, err := getVMConfig(subtest.Fork) | 	config, eips, err := getVMConfig(subtest.Fork) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, common.Hash{}, UnsupportedForkError{subtest.Fork} | 		return nil, common.Hash{}, UnsupportedForkError{subtest.Fork} | ||||||
| 	} | 	} | ||||||
| 	vmconfig.ExtraEips = eips | 	vmconfig.ExtraEips = eips | ||||||
| 	block := t.genesis(config).ToBlock(nil) | 	block := t.genesis(config).ToBlock(nil) | ||||||
| 	statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre) | 	statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter) | ||||||
| 
 | 
 | ||||||
| 	post := t.json.Post[subtest.Fork][subtest.Index] | 	post := t.json.Post[subtest.Fork][subtest.Index] | ||||||
| 	msg, err := t.json.Tx.toMessage(post) | 	msg, err := t.json.Tx.toMessage(post) | ||||||
| @ -204,9 +206,9 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { | |||||||
| 	return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] | 	return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { | func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) *state.StateDB { | ||||||
| 	sdb := state.NewDatabase(db) | 	sdb := state.NewDatabase(db) | ||||||
| 	statedb, _ := state.New(common.Hash{}, sdb) | 	statedb, _ := state.New(common.Hash{}, sdb, nil) | ||||||
| 	for addr, a := range accounts { | 	for addr, a := range accounts { | ||||||
| 		statedb.SetCode(addr, a.Code) | 		statedb.SetCode(addr, a.Code) | ||||||
| 		statedb.SetNonce(addr, a.Nonce) | 		statedb.SetNonce(addr, a.Nonce) | ||||||
| @ -217,7 +219,12 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB | |||||||
| 	} | 	} | ||||||
| 	// Commit and re-open to start with a clean state.
 | 	// Commit and re-open to start with a clean state.
 | ||||||
| 	root, _ := statedb.Commit(false) | 	root, _ := statedb.Commit(false) | ||||||
| 	statedb, _ = state.New(root, sdb) | 
 | ||||||
|  | 	var snaps *snapshot.Tree | ||||||
|  | 	if snapshotter { | ||||||
|  | 		snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false) | ||||||
|  | 	} | ||||||
|  | 	statedb, _ = state.New(root, sdb, snaps) | ||||||
| 	return statedb | 	return statedb | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | |||||||
| @ -45,7 +45,6 @@ type ttFork struct { | |||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (tt *TransactionTest) Run(config *params.ChainConfig) error { | func (tt *TransactionTest) Run(config *params.ChainConfig) error { | ||||||
| 
 |  | ||||||
| 	validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) { | 	validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) { | ||||||
| 		tx := new(types.Transaction) | 		tx := new(types.Transaction) | ||||||
| 		if err := rlp.DecodeBytes(rlpData, tx); err != nil { | 		if err := rlp.DecodeBytes(rlpData, tx); err != nil { | ||||||
|  | |||||||
| @ -30,7 +30,10 @@ func TestVM(t *testing.T) { | |||||||
| 
 | 
 | ||||||
| 	vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) { | 	vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) { | ||||||
| 		withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { | 		withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { | ||||||
| 			return vmt.checkFailure(t, name, test.Run(vmconfig)) | 			return vmt.checkFailure(t, name+"/trie", test.Run(vmconfig, false)) | ||||||
|  | 		}) | ||||||
|  | 		withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error { | ||||||
|  | 			return vmt.checkFailure(t, name+"/snap", test.Run(vmconfig, true)) | ||||||
| 		}) | 		}) | ||||||
| 	}) | 	}) | ||||||
| } | } | ||||||
|  | |||||||
| @ -78,8 +78,8 @@ type vmExecMarshaling struct { | |||||||
| 	GasPrice *math.HexOrDecimal256 | 	GasPrice *math.HexOrDecimal256 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (t *VMTest) Run(vmconfig vm.Config) error { | func (t *VMTest) Run(vmconfig vm.Config, snapshotter bool) error { | ||||||
| 	statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre) | 	statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter) | ||||||
| 	ret, gasRemaining, err := t.exec(statedb, vmconfig) | 	ret, gasRemaining, err := t.exec(statedb, vmconfig) | ||||||
| 
 | 
 | ||||||
| 	if t.json.GasRemaining == nil { | 	if t.json.GasRemaining == nil { | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user