core/state/snapshot: unlink snapshots from blocks, quad->linear cleanup
This commit is contained in:
		
							parent
							
								
									cdf3f016df
								
							
						
					
					
						commit
						d754091a87
					
				| @ -140,10 +140,10 @@ type BlockChain struct { | ||||
| 	chainConfig *params.ChainConfig // Chain & network configuration
 | ||||
| 	cacheConfig *CacheConfig        // Cache configuration for pruning
 | ||||
| 
 | ||||
| 	db     ethdb.Database         // Low level persistent database to store final content in
 | ||||
| 	snaps  *snapshot.SnapshotTree // Snapshot tree for fast trie leaf access
 | ||||
| 	triegc *prque.Prque           // Priority queue mapping block numbers to tries to gc
 | ||||
| 	gcproc time.Duration          // Accumulates canonical block processing for trie dumping
 | ||||
| 	db     ethdb.Database // Low level persistent database to store final content in
 | ||||
| 	snaps  *snapshot.Tree // Snapshot tree for fast trie leaf access
 | ||||
| 	triegc *prque.Prque   // Priority queue mapping block numbers to tries to gc
 | ||||
| 	gcproc time.Duration  // Accumulates canonical block processing for trie dumping
 | ||||
| 
 | ||||
| 	hc            *HeaderChain | ||||
| 	rmLogsFeed    event.Feed | ||||
| @ -301,7 +301,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par | ||||
| 	} | ||||
| 	// Load any existing snapshot, regenerating it if loading failed
 | ||||
| 	head := bc.CurrentBlock() | ||||
| 	if bc.snaps, err = snapshot.New(bc.db, "snapshot.rlp", head.NumberU64(), head.Root()); err != nil { | ||||
| 	if bc.snaps, err = snapshot.New(bc.db, "snapshot.rlp", head.Root()); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// Take ownership of this particular state
 | ||||
|  | ||||
| @ -17,38 +17,36 @@ | ||||
| package rawdb | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/ethdb" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| ) | ||||
| 
 | ||||
| // ReadSnapshotBlock retrieves the number and root of the block whose state is
 | ||||
| // contained in the persisted snapshot.
 | ||||
| func ReadSnapshotBlock(db ethdb.KeyValueReader) (uint64, common.Hash) { | ||||
| 	data, _ := db.Get(snapshotBlockKey) | ||||
| 	if len(data) != 8+common.HashLength { | ||||
| 		return 0, common.Hash{} | ||||
| // ReadSnapshotRoot retrieves the root of the block whose state is contained in
 | ||||
| // the persisted snapshot.
 | ||||
| func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { | ||||
| 	data, _ := db.Get(snapshotRootKey) | ||||
| 	if len(data) != common.HashLength { | ||||
| 		return common.Hash{} | ||||
| 	} | ||||
| 	return binary.BigEndian.Uint64(data[:8]), common.BytesToHash(data[8:]) | ||||
| 	return common.BytesToHash(data) | ||||
| } | ||||
| 
 | ||||
| // WriteSnapshotBlock stores the number and root of the block whose state is
 | ||||
| // contained in the persisted snapshot.
 | ||||
| func WriteSnapshotBlock(db ethdb.KeyValueWriter, number uint64, root common.Hash) { | ||||
| 	if err := db.Put(snapshotBlockKey, append(encodeBlockNumber(number), root.Bytes()...)); err != nil { | ||||
| 		log.Crit("Failed to store snapsnot block's number and root", "err", err) | ||||
| // WriteSnapshotRoot stores the root of the block whose state is contained in
 | ||||
| // the persisted snapshot.
 | ||||
| func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { | ||||
| 	if err := db.Put(snapshotRootKey, root[:]); err != nil { | ||||
| 		log.Crit("Failed to store snapshot root", "err", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // DeleteSnapshotBlock deletes the number and hash of the block whose state is
 | ||||
| // contained in the persisted snapshot. Since snapshots are not immutable, this
 | ||||
| // method can be used during updates, so a crash or failure will mark the entire
 | ||||
| // snapshot invalid.
 | ||||
| func DeleteSnapshotBlock(db ethdb.KeyValueWriter) { | ||||
| 	if err := db.Delete(snapshotBlockKey); err != nil { | ||||
| 		log.Crit("Failed to remove snapsnot block's number and hash", "err", err) | ||||
| // DeleteSnapshotRoot deletes the hash of the block whose state is contained in
 | ||||
| // the persisted snapshot. Since snapshots are not immutable, this  method can
 | ||||
| // be used during updates, so a crash or failure will mark the entire snapshot
 | ||||
| // invalid.
 | ||||
| func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { | ||||
| 	if err := db.Delete(snapshotRootKey); err != nil { | ||||
| 		log.Crit("Failed to remove snapshot root", "err", err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -41,8 +41,8 @@ var ( | ||||
| 	// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
 | ||||
| 	fastTrieProgressKey = []byte("TrieSync") | ||||
| 
 | ||||
| 	// snapshotBlockKey tracks the number and hash of the last snapshot.
 | ||||
| 	snapshotBlockKey = []byte("SnapshotBlock") | ||||
| 	// snapshotRootKey tracks the number and hash of the last snapshot.
 | ||||
| 	snapshotRootKey = []byte("SnapshotRoot") | ||||
| 
 | ||||
| 	// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
 | ||||
| 	headerPrefix       = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
 | ||||
|  | ||||
| @ -36,9 +36,8 @@ type diffLayer struct { | ||||
| 	parent snapshot // Parent snapshot modified by this one, never nil
 | ||||
| 	memory uint64   // Approximate guess as to how much memory we use
 | ||||
| 
 | ||||
| 	number uint64      // Block number to which this snapshot diff belongs to
 | ||||
| 	root   common.Hash // Root hash to which this snapshot diff belongs to
 | ||||
| 	stale  bool        // Signals that the layer became stale (state progressed)
 | ||||
| 	root  common.Hash // Root hash to which this snapshot diff belongs to
 | ||||
| 	stale bool        // Signals that the layer became stale (state progressed)
 | ||||
| 
 | ||||
| 	accountList []common.Hash                          // List of account for iteration. If it exists, it's sorted, otherwise it's nil
 | ||||
| 	accountData map[common.Hash][]byte                 // Keyed accounts for direct retrival (nil means deleted)
 | ||||
| @ -50,11 +49,10 @@ type diffLayer struct { | ||||
| 
 | ||||
| // newDiffLayer creates a new diff on top of an existing snapshot, whether that's a low
 | ||||
| // level persistent database or a hierarchical diff already.
 | ||||
| func newDiffLayer(parent snapshot, number uint64, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||
| func newDiffLayer(parent snapshot, root common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||
| 	// Create the new layer with some pre-allocated data segments
 | ||||
| 	dl := &diffLayer{ | ||||
| 		parent:      parent, | ||||
| 		number:      number, | ||||
| 		root:        root, | ||||
| 		accountData: accounts, | ||||
| 		storageData: storage, | ||||
| @ -63,7 +61,6 @@ func newDiffLayer(parent snapshot, number uint64, root common.Hash, accounts map | ||||
| 	for _, data := range accounts { | ||||
| 		dl.memory += uint64(len(data)) | ||||
| 	} | ||||
| 
 | ||||
| 	// Fill the storage hashes and sort them for the iterator
 | ||||
| 	dl.storageList = make(map[common.Hash][]common.Hash) | ||||
| 
 | ||||
| @ -93,9 +90,18 @@ func newDiffLayer(parent snapshot, number uint64, root common.Hash, accounts map | ||||
| 	return dl | ||||
| } | ||||
| 
 | ||||
| // Info returns the block number and root hash for which this snapshot was made.
 | ||||
| func (dl *diffLayer) Info() (uint64, common.Hash) { | ||||
| 	return dl.number, dl.root | ||||
| // Root returns the root hash for which this snapshot was made.
 | ||||
| func (dl *diffLayer) Root() common.Hash { | ||||
| 	return dl.root | ||||
| } | ||||
| 
 | ||||
| // Stale return whether this layer has become stale (was flattened across) or if
 | ||||
| // it's still live.
 | ||||
| func (dl *diffLayer) Stale() bool { | ||||
| 	dl.lock.RLock() | ||||
| 	defer dl.lock.RUnlock() | ||||
| 
 | ||||
| 	return dl.stale | ||||
| } | ||||
| 
 | ||||
| // Account directly retrieves the account associated with a particular hash in
 | ||||
| @ -164,7 +170,7 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro | ||||
| // Update creates a new layer on top of the existing snapshot diff tree with
 | ||||
| // the specified data items.
 | ||||
| func (dl *diffLayer) Update(blockRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||
| 	return newDiffLayer(dl, dl.number+1, blockRoot, accounts, storage) | ||||
| 	return newDiffLayer(dl, blockRoot, accounts, storage) | ||||
| } | ||||
| 
 | ||||
| // flatten pushes all data from this point downwards, flattening everything into
 | ||||
| @ -213,7 +219,6 @@ func (dl *diffLayer) flatten() snapshot { | ||||
| 	// Return the combo parent
 | ||||
| 	return &diffLayer{ | ||||
| 		parent:      parent.parent, | ||||
| 		number:      dl.number, | ||||
| 		root:        dl.root, | ||||
| 		storageList: parent.storageList, | ||||
| 		storageData: parent.storageData, | ||||
|  | ||||
| @ -43,18 +43,12 @@ type journalStorage struct { | ||||
| // diff and verifying that it can be linked to the requested parent.
 | ||||
| func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { | ||||
| 	// Read the next diff journal entry
 | ||||
| 	var ( | ||||
| 		number uint64 | ||||
| 		root   common.Hash | ||||
| 	) | ||||
| 	if err := r.Decode(&number); err != nil { | ||||
| 	var root common.Hash | ||||
| 	if err := r.Decode(&root); err != nil { | ||||
| 		// The first read may fail with EOF, marking the end of the journal
 | ||||
| 		if err == io.EOF { | ||||
| 			return parent, nil | ||||
| 		} | ||||
| 		return nil, fmt.Errorf("load diff number: %v", err) | ||||
| 	} | ||||
| 	if err := r.Decode(&root); err != nil { | ||||
| 		return nil, fmt.Errorf("load diff root: %v", err) | ||||
| 	} | ||||
| 	var accounts []journalAccount | ||||
| @ -77,13 +71,7 @@ func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { | ||||
| 		} | ||||
| 		storageData[entry.Hash] = slots | ||||
| 	} | ||||
| 	// Validate the block number to avoid state corruption
 | ||||
| 	if parent, ok := parent.(*diffLayer); ok { | ||||
| 		if number != parent.number+1 { | ||||
| 			return nil, fmt.Errorf("snapshot chain broken: block #%d after #%d", number, parent.number) | ||||
| 		} | ||||
| 	} | ||||
| 	return loadDiffLayer(newDiffLayer(parent, number, root, accountData, storageData), r) | ||||
| 	return loadDiffLayer(newDiffLayer(parent, root, accountData, storageData), r) | ||||
| } | ||||
| 
 | ||||
| // journal is the internal version of Journal that also returns the journal file
 | ||||
| @ -113,13 +101,8 @@ func (dl *diffLayer) journal() (io.WriteCloser, error) { | ||||
| 		writer.Close() | ||||
| 		return nil, ErrSnapshotStale | ||||
| 	} | ||||
| 	buf := bufio.NewWriter(writer) | ||||
| 	// Everything below was journalled, persist this layer too
 | ||||
| 	if err := rlp.Encode(buf, dl.number); err != nil { | ||||
| 		buf.Flush() | ||||
| 		writer.Close() | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	buf := bufio.NewWriter(writer) | ||||
| 	if err := rlp.Encode(buf, dl.root); err != nil { | ||||
| 		buf.Flush() | ||||
| 		writer.Close() | ||||
|  | ||||
| @ -61,11 +61,11 @@ func TestMergeBasics(t *testing.T) { | ||||
| 		} | ||||
| 	} | ||||
| 	// Add some (identical) layers on top
 | ||||
| 	parent := newDiffLayer(emptyLayer{}, 1, common.Hash{}, accounts, storage) | ||||
| 	child := newDiffLayer(parent, 1, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, 1, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, 1, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, 1, common.Hash{}, accounts, storage) | ||||
| 	parent := newDiffLayer(emptyLayer{}, common.Hash{}, accounts, storage) | ||||
| 	child := newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, common.Hash{}, accounts, storage) | ||||
| 	child = newDiffLayer(child, common.Hash{}, accounts, storage) | ||||
| 	// And flatten
 | ||||
| 	merged := (child.flatten()).(*diffLayer) | ||||
| 
 | ||||
| @ -122,7 +122,7 @@ func TestMergeDelete(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	// Add some flip-flopping layers on top
 | ||||
| 	parent := newDiffLayer(emptyLayer{}, 1, common.Hash{}, flip(), storage) | ||||
| 	parent := newDiffLayer(emptyLayer{}, common.Hash{}, flip(), storage) | ||||
| 	child := parent.Update(common.Hash{}, flop(), storage) | ||||
| 	child = child.Update(common.Hash{}, flip(), storage) | ||||
| 	child = child.Update(common.Hash{}, flop(), storage) | ||||
| @ -139,10 +139,6 @@ func TestMergeDelete(t *testing.T) { | ||||
| 	// And flatten
 | ||||
| 	merged := (child.flatten()).(*diffLayer) | ||||
| 
 | ||||
| 	// check number
 | ||||
| 	if got, exp := merged.number, child.number; got != exp { | ||||
| 		t.Errorf("merged layer: wrong number - exp %d got %d", exp, got) | ||||
| 	} | ||||
| 	if data, _ := merged.Account(h1); data == nil { | ||||
| 		t.Errorf("merged layer: expected %x to be non-nil", h1) | ||||
| 	} | ||||
| @ -169,7 +165,7 @@ func TestInsertAndMerge(t *testing.T) { | ||||
| 	{ | ||||
| 		var accounts = make(map[common.Hash][]byte) | ||||
| 		var storage = make(map[common.Hash]map[common.Hash][]byte) | ||||
| 		parent = newDiffLayer(emptyLayer{}, 1, common.Hash{}, accounts, storage) | ||||
| 		parent = newDiffLayer(emptyLayer{}, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 	{ | ||||
| 		var accounts = make(map[common.Hash][]byte) | ||||
| @ -178,7 +174,7 @@ func TestInsertAndMerge(t *testing.T) { | ||||
| 		accstorage := make(map[common.Hash][]byte) | ||||
| 		storage[acc] = accstorage | ||||
| 		storage[acc][slot] = []byte{0x01} | ||||
| 		child = newDiffLayer(parent, 2, common.Hash{}, accounts, storage) | ||||
| 		child = newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 	// And flatten
 | ||||
| 	merged := (child.flatten()).(*diffLayer) | ||||
| @ -200,11 +196,12 @@ func (emptyLayer) Journal() error { | ||||
| 	panic("implement me") | ||||
| } | ||||
| 
 | ||||
| func (emptyLayer) Info() (uint64, common.Hash) { | ||||
| 	return 0, common.Hash{} | ||||
| func (emptyLayer) Stale() bool { | ||||
| 	panic("implement me") | ||||
| } | ||||
| func (emptyLayer) Number() uint64 { | ||||
| 	return 0 | ||||
| 
 | ||||
| func (emptyLayer) Root() common.Hash { | ||||
| 	return common.Hash{} | ||||
| } | ||||
| 
 | ||||
| func (emptyLayer) Account(hash common.Hash) (*Account, error) { | ||||
| @ -227,8 +224,6 @@ func (emptyLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) | ||||
| // BenchmarkSearch-6   	  500000	      3723 ns/op (10k per layer, only top-level RLock()
 | ||||
| func BenchmarkSearch(b *testing.B) { | ||||
| 	// First, we set up 128 diff layers, with 1K items each
 | ||||
| 
 | ||||
| 	blocknum := uint64(0) | ||||
| 	fill := func(parent snapshot) *diffLayer { | ||||
| 		accounts := make(map[common.Hash][]byte) | ||||
| 		storage := make(map[common.Hash]map[common.Hash][]byte) | ||||
| @ -236,10 +231,8 @@ func BenchmarkSearch(b *testing.B) { | ||||
| 		for i := 0; i < 10000; i++ { | ||||
| 			accounts[randomHash()] = randomAccount() | ||||
| 		} | ||||
| 		blocknum++ | ||||
| 		return newDiffLayer(parent, blocknum, common.Hash{}, accounts, storage) | ||||
| 		return newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 
 | ||||
| 	var layer snapshot | ||||
| 	layer = emptyLayer{} | ||||
| 	for i := 0; i < 128; i++ { | ||||
| @ -261,8 +254,6 @@ func BenchmarkSearch(b *testing.B) { | ||||
| // BenchmarkSearchSlot-6   	  100000	     14551 ns/op (when checking parent number using atomic)
 | ||||
| func BenchmarkSearchSlot(b *testing.B) { | ||||
| 	// First, we set up 128 diff layers, with 1K items each
 | ||||
| 
 | ||||
| 	blocknum := uint64(0) | ||||
| 	accountKey := common.Hash{} | ||||
| 	storageKey := common.HexToHash("0x1337") | ||||
| 	accountRLP := randomAccount() | ||||
| @ -278,16 +269,13 @@ func BenchmarkSearchSlot(b *testing.B) { | ||||
| 			accStorage[randomHash()] = value | ||||
| 			storage[accountKey] = accStorage | ||||
| 		} | ||||
| 		blocknum++ | ||||
| 		return newDiffLayer(parent, blocknum, common.Hash{}, accounts, storage) | ||||
| 		return newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 
 | ||||
| 	var layer snapshot | ||||
| 	layer = emptyLayer{} | ||||
| 	for i := 0; i < 128; i++ { | ||||
| 		layer = fill(layer) | ||||
| 	} | ||||
| 
 | ||||
| 	b.ResetTimer() | ||||
| 	for i := 0; i < b.N; i++ { | ||||
| 		layer.Storage(accountKey, storageKey) | ||||
| @ -300,7 +288,7 @@ func BenchmarkSearchSlot(b *testing.B) { | ||||
| // Without sorting and tracking accountlist
 | ||||
| // BenchmarkFlatten-6   	     300	   5511511 ns/op
 | ||||
| func BenchmarkFlatten(b *testing.B) { | ||||
| 	fill := func(parent snapshot, blocknum int) *diffLayer { | ||||
| 	fill := func(parent snapshot) *diffLayer { | ||||
| 		accounts := make(map[common.Hash][]byte) | ||||
| 		storage := make(map[common.Hash]map[common.Hash][]byte) | ||||
| 
 | ||||
| @ -317,7 +305,7 @@ func BenchmarkFlatten(b *testing.B) { | ||||
| 			} | ||||
| 			storage[accountKey] = accStorage | ||||
| 		} | ||||
| 		return newDiffLayer(parent, uint64(blocknum), common.Hash{}, accounts, storage) | ||||
| 		return newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 
 | ||||
| 	b.ResetTimer() | ||||
| @ -327,7 +315,7 @@ func BenchmarkFlatten(b *testing.B) { | ||||
| 		var layer snapshot | ||||
| 		layer = emptyLayer{} | ||||
| 		for i := 1; i < 128; i++ { | ||||
| 			layer = fill(layer, i) | ||||
| 			layer = fill(layer) | ||||
| 		} | ||||
| 		b.StartTimer() | ||||
| 
 | ||||
| @ -336,7 +324,6 @@ func BenchmarkFlatten(b *testing.B) { | ||||
| 			if !ok { | ||||
| 				break | ||||
| 			} | ||||
| 
 | ||||
| 			layer = dl.flatten() | ||||
| 		} | ||||
| 		b.StopTimer() | ||||
| @ -351,7 +338,7 @@ func BenchmarkFlatten(b *testing.B) { | ||||
| // BenchmarkJournal-6   	       1	1471373923 ns/ops
 | ||||
| // BenchmarkJournal-6   	       1	1208083335 ns/op // bufio writer
 | ||||
| func BenchmarkJournal(b *testing.B) { | ||||
| 	fill := func(parent snapshot, blocknum int) *diffLayer { | ||||
| 	fill := func(parent snapshot) *diffLayer { | ||||
| 		accounts := make(map[common.Hash][]byte) | ||||
| 		storage := make(map[common.Hash]map[common.Hash][]byte) | ||||
| 
 | ||||
| @ -368,15 +355,14 @@ func BenchmarkJournal(b *testing.B) { | ||||
| 			} | ||||
| 			storage[accountKey] = accStorage | ||||
| 		} | ||||
| 		return newDiffLayer(parent, uint64(blocknum), common.Hash{}, accounts, storage) | ||||
| 		return newDiffLayer(parent, common.Hash{}, accounts, storage) | ||||
| 	} | ||||
| 
 | ||||
| 	var layer snapshot | ||||
| 	layer = &diskLayer{ | ||||
| 		journal: path.Join(os.TempDir(), "difflayer_journal.tmp"), | ||||
| 	} | ||||
| 	for i := 1; i < 128; i++ { | ||||
| 		layer = fill(layer, i) | ||||
| 		layer = fill(layer) | ||||
| 	} | ||||
| 	b.ResetTimer() | ||||
| 
 | ||||
|  | ||||
| @ -32,16 +32,24 @@ type diskLayer struct { | ||||
| 	db      ethdb.KeyValueStore // Key-value store containing the base snapshot
 | ||||
| 	cache   *bigcache.BigCache  // Cache to avoid hitting the disk for direct access
 | ||||
| 
 | ||||
| 	number uint64      // Block number of the base snapshot
 | ||||
| 	root   common.Hash // Root hash of the base snapshot
 | ||||
| 	stale  bool        // Signals that the layer became stale (state progressed)
 | ||||
| 	root  common.Hash // Root hash of the base snapshot
 | ||||
| 	stale bool        // Signals that the layer became stale (state progressed)
 | ||||
| 
 | ||||
| 	lock sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| // Info returns the block number and root hash for which this snapshot was made.
 | ||||
| func (dl *diskLayer) Info() (uint64, common.Hash) { | ||||
| 	return dl.number, dl.root | ||||
| // Root returns  root hash for which this snapshot was made.
 | ||||
| func (dl *diskLayer) Root() common.Hash { | ||||
| 	return dl.root | ||||
| } | ||||
| 
 | ||||
| // Stale return whether this layer has become stale (was flattened across) or if
 | ||||
| // it's still live.
 | ||||
| func (dl *diskLayer) Stale() bool { | ||||
| 	dl.lock.RLock() | ||||
| 	defer dl.lock.RUnlock() | ||||
| 
 | ||||
| 	return dl.stale | ||||
| } | ||||
| 
 | ||||
| // Account directly retrieves the account associated with a particular hash in
 | ||||
| @ -123,7 +131,7 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro | ||||
| // the specified data items. Note, the maps are retained by the method to avoid
 | ||||
| // copying everything.
 | ||||
| func (dl *diskLayer) Update(blockHash common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { | ||||
| 	return newDiffLayer(dl, dl.number+1, blockHash, accounts, storage) | ||||
| 	return newDiffLayer(dl, blockHash, accounts, storage) | ||||
| } | ||||
| 
 | ||||
| // Journal commits an entire diff hierarchy to disk into a single journal file.
 | ||||
|  | ||||
| @ -85,7 +85,7 @@ func wipeSnapshot(db ethdb.KeyValueStore) error { | ||||
| 	} | ||||
| 	it.Release() | ||||
| 
 | ||||
| 	rawdb.DeleteSnapshotBlock(batch) | ||||
| 	rawdb.DeleteSnapshotRoot(batch) | ||||
| 	if err := batch.Write(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| @ -107,7 +107,7 @@ func wipeSnapshot(db ethdb.KeyValueStore) error { | ||||
| } | ||||
| 
 | ||||
| // generateSnapshot regenerates a brand new snapshot based on an existing state database and head block.
 | ||||
| func generateSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (snapshot, error) { | ||||
| func generateSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (snapshot, error) { | ||||
| 	// Wipe any previously existing snapshot from the database
 | ||||
| 	if err := wipeSnapshot(db); err != nil { | ||||
| 		return nil, err | ||||
| @ -124,7 +124,7 @@ func generateSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, | ||||
| 	batch := db.NewBatch() | ||||
| 	triedb := trie.NewDatabase(db) | ||||
| 
 | ||||
| 	accTrie, err := trie.NewSecure(headRoot, triedb) | ||||
| 	accTrie, err := trie.NewSecure(root, triedb) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| @ -186,7 +186,7 @@ func generateSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, | ||||
| 	fmt.Printf("Totals: %9s (%d accs, %d nodes) + %9s (%d slots, %d nodes)\n", accountSize.TerminalString(), accountCount, accIt.Nodes, storageSize.TerminalString(), storageCount, storageNodes) | ||||
| 
 | ||||
| 	// Update the snapshot block marker and write any remainder data
 | ||||
| 	rawdb.WriteSnapshotBlock(batch, headNumber, headRoot) | ||||
| 	rawdb.WriteSnapshotRoot(batch, root) | ||||
| 	batch.Write() | ||||
| 	batch.Reset() | ||||
| 
 | ||||
| @ -207,7 +207,6 @@ func generateSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, | ||||
| 		journal: journal, | ||||
| 		db:      db, | ||||
| 		cache:   cache, | ||||
| 		number:  headNumber, | ||||
| 		root:    headRoot, | ||||
| 		root:    root, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| @ -47,7 +47,7 @@ func TestWipe(t *testing.T) { | ||||
| 			rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes()) | ||||
| 		} | ||||
| 	} | ||||
| 	rawdb.WriteSnapshotBlock(db, 123, randomHash()) | ||||
| 	rawdb.WriteSnapshotRoot(db, randomHash()) | ||||
| 
 | ||||
| 	// Add some random non-snapshot data too to make wiping harder
 | ||||
| 	for i := 0; i < 65536; i++ { | ||||
| @ -76,8 +76,8 @@ func TestWipe(t *testing.T) { | ||||
| 	if items != 128+128*1024 { | ||||
| 		t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024) | ||||
| 	} | ||||
| 	if number, hash := rawdb.ReadSnapshotBlock(db); number != 123 || hash == (common.Hash{}) { | ||||
| 		t.Errorf("snapshot block marker mismatch: have #%d [%#x], want #%d [<not-nil>]", number, hash, 123) | ||||
| 	if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) { | ||||
| 		t.Errorf("snapshot block marker mismatch: have %#x, want <not-nil>", hash) | ||||
| 	} | ||||
| 	// Wipe all snapshot entries from the database
 | ||||
| 	if err := wipeSnapshot(db); err != nil { | ||||
| @ -93,8 +93,8 @@ func TestWipe(t *testing.T) { | ||||
| 			t.Errorf("snapshot entry remained after wipe: %x", key) | ||||
| 		} | ||||
| 	} | ||||
| 	if number, hash := rawdb.ReadSnapshotBlock(db); number != 0 || hash != (common.Hash{}) { | ||||
| 		t.Errorf("snapshot block marker remained after wipe: #%d [%#x]", number, hash) | ||||
| 	if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) { | ||||
| 		t.Errorf("snapshot block marker remained after wipe: %#x", hash) | ||||
| 	} | ||||
| 	// Iterate over the database and ensure miscellaneous items are present
 | ||||
| 	items = 0 | ||||
|  | ||||
| @ -43,12 +43,16 @@ var ( | ||||
| 	// layer had been invalidated due to the chain progressing forward far enough
 | ||||
| 	// to not maintain the layer's original state.
 | ||||
| 	ErrSnapshotStale = errors.New("snapshot stale") | ||||
| 
 | ||||
| 	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
 | ||||
| 	// that forms a cycle in the snapshot tree.
 | ||||
| 	errSnapshotCycle = errors.New("snapshot cycle") | ||||
| ) | ||||
| 
 | ||||
| // Snapshot represents the functionality supported by a snapshot storage layer.
 | ||||
| type Snapshot interface { | ||||
| 	// Info returns the block number and root hash for which this snapshot was made.
 | ||||
| 	Info() (uint64, common.Hash) | ||||
| 	// Root returns the root hash for which this snapshot was made.
 | ||||
| 	Root() common.Hash | ||||
| 
 | ||||
| 	// Account directly retrieves the account associated with a particular hash in
 | ||||
| 	// the snapshot slim data format.
 | ||||
| @ -77,6 +81,10 @@ type snapshot interface { | ||||
| 	// This is meant to be used during shutdown to persist the snapshot without
 | ||||
| 	// flattening everything down (bad for reorgs).
 | ||||
| 	Journal() error | ||||
| 
 | ||||
| 	// Stale return whether this layer has become stale (was flattened across) or
 | ||||
| 	// if it's still live.
 | ||||
| 	Stale() bool | ||||
| } | ||||
| 
 | ||||
| // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
 | ||||
| @ -88,7 +96,7 @@ type snapshot interface { | ||||
| // The goal of a state snapshot is twofold: to allow direct access to account and
 | ||||
| // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
 | ||||
| // cheap iteration of the account/storage tries for sync aid.
 | ||||
| type SnapshotTree struct { | ||||
| type Tree struct { | ||||
| 	layers map[common.Hash]snapshot // Collection of all known layers // TODO(karalabe): split Clique overlaps
 | ||||
| 	lock   sync.RWMutex | ||||
| } | ||||
| @ -99,22 +107,21 @@ type SnapshotTree struct { | ||||
| //
 | ||||
| // If the snapshot is missing or inconsistent, the entirety is deleted and will
 | ||||
| // be reconstructed from scratch based on the tries in the key-value store.
 | ||||
| func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (*SnapshotTree, error) { | ||||
| func New(db ethdb.KeyValueStore, journal string, root common.Hash) (*Tree, error) { | ||||
| 	// Attempt to load a previously persisted snapshot
 | ||||
| 	head, err := loadSnapshot(db, journal, headNumber, headRoot) | ||||
| 	head, err := loadSnapshot(db, journal, root) | ||||
| 	if err != nil { | ||||
| 		log.Warn("Failed to load snapshot, regenerating", "err", err) | ||||
| 		if head, err = generateSnapshot(db, journal, headNumber, headRoot); err != nil { | ||||
| 		if head, err = generateSnapshot(db, journal, root); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	// Existing snapshot loaded or one regenerated, seed all the layers
 | ||||
| 	snap := &SnapshotTree{ | ||||
| 	snap := &Tree{ | ||||
| 		layers: make(map[common.Hash]snapshot), | ||||
| 	} | ||||
| 	for head != nil { | ||||
| 		_, root := head.Info() | ||||
| 		snap.layers[root] = head | ||||
| 		snap.layers[head.Root()] = head | ||||
| 
 | ||||
| 		switch self := head.(type) { | ||||
| 		case *diffLayer: | ||||
| @ -130,54 +137,57 @@ func New(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot com | ||||
| 
 | ||||
| // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
 | ||||
| // snapshot is maintained for that block.
 | ||||
| func (st *SnapshotTree) Snapshot(blockRoot common.Hash) Snapshot { | ||||
| 	st.lock.RLock() | ||||
| 	defer st.lock.RUnlock() | ||||
| func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot { | ||||
| 	t.lock.RLock() | ||||
| 	defer t.lock.RUnlock() | ||||
| 
 | ||||
| 	return st.layers[blockRoot] | ||||
| 	return t.layers[blockRoot] | ||||
| } | ||||
| 
 | ||||
| // Update adds a new snapshot into the tree, if that can be linked to an existing
 | ||||
| // old parent. It is disallowed to insert a disk layer (the origin of all).
 | ||||
| func (st *SnapshotTree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { | ||||
| func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { | ||||
| 	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
 | ||||
| 	// special case that can only happen for Clique networks where empty blocks
 | ||||
| 	// don't modify the state (0 block subsidy).
 | ||||
| 	//
 | ||||
| 	// Although we could silently ignore this internally, it should be the caller's
 | ||||
| 	// responsibility to avoid even attempting to insert such a snapshot.
 | ||||
| 	if blockRoot == parentRoot { | ||||
| 		return errSnapshotCycle | ||||
| 	} | ||||
| 	// Generate a new snapshot on top of the parent
 | ||||
| 	parent := st.Snapshot(parentRoot).(snapshot) | ||||
| 	parent := t.Snapshot(parentRoot).(snapshot) | ||||
| 	if parent == nil { | ||||
| 		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) | ||||
| 	} | ||||
| 	snap := parent.Update(blockRoot, accounts, storage) | ||||
| 
 | ||||
| 	// Save the new snapshot for later
 | ||||
| 	st.lock.Lock() | ||||
| 	defer st.lock.Unlock() | ||||
| 	t.lock.Lock() | ||||
| 	defer t.lock.Unlock() | ||||
| 
 | ||||
| 	st.layers[snap.root] = snap | ||||
| 	t.layers[snap.root] = snap | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Cap traverses downwards the snapshot tree from a head block hash until the
 | ||||
| // number of allowed layers are crossed. All layers beyond the permitted number
 | ||||
| // are flattened downwards.
 | ||||
| func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) error { | ||||
| func (t *Tree) Cap(root common.Hash, layers int, memory uint64) error { | ||||
| 	// Retrieve the head snapshot to cap from
 | ||||
| 	var snap snapshot | ||||
| 	if s := st.Snapshot(blockRoot); s == nil { | ||||
| 		return fmt.Errorf("snapshot [%#x] missing", blockRoot) | ||||
| 	} else { | ||||
| 		snap = s.(snapshot) | ||||
| 	snap := t.Snapshot(root) | ||||
| 	if snap == nil { | ||||
| 		return fmt.Errorf("snapshot [%#x] missing", root) | ||||
| 	} | ||||
| 	diff, ok := snap.(*diffLayer) | ||||
| 	if !ok { | ||||
| 		return fmt.Errorf("snapshot [%#x] is disk layer", blockRoot) | ||||
| 		return fmt.Errorf("snapshot [%#x] is disk layer", root) | ||||
| 	} | ||||
| 	// Run the internal capping and discard all stale layers
 | ||||
| 	st.lock.Lock() | ||||
| 	defer st.lock.Unlock() | ||||
| 	t.lock.Lock() | ||||
| 	defer t.lock.Unlock() | ||||
| 
 | ||||
| 	var ( | ||||
| 		diskNumber uint64 | ||||
| 		diffNumber uint64 | ||||
| 	) | ||||
| 	// Flattening the bottom-most diff layer requires special casing since there's
 | ||||
| 	// no child to rewire to the grandparent. In that case we can fake a temporary
 | ||||
| 	// child for the capping and then remove it.
 | ||||
| @ -188,8 +198,9 @@ func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) er | ||||
| 		base := diffToDisk(diff.flatten().(*diffLayer)) | ||||
| 		diff.lock.RUnlock() | ||||
| 
 | ||||
| 		st.layers[base.root] = base | ||||
| 		diskNumber, diffNumber = base.number, base.number | ||||
| 		// Replace the entire snapshot tree with the flat base
 | ||||
| 		t.layers = map[common.Hash]snapshot{base.root: base} | ||||
| 		return nil | ||||
| 
 | ||||
| 	case 1: | ||||
| 		// If full flattening was requested, flatten the diffs but only merge if the
 | ||||
| @ -205,59 +216,74 @@ func (st *SnapshotTree) Cap(blockRoot common.Hash, layers int, memory uint64) er | ||||
| 		} | ||||
| 		diff.lock.RUnlock() | ||||
| 
 | ||||
| 		// If all diff layers were removed, replace the entire snapshot tree
 | ||||
| 		if base != nil { | ||||
| 			st.layers[base.root] = base | ||||
| 			diskNumber, diffNumber = base.number, base.number | ||||
| 		} else { | ||||
| 			st.layers[bottom.root] = bottom | ||||
| 			diskNumber, diffNumber = bottom.parent.(*diskLayer).number, bottom.number | ||||
| 			t.layers = map[common.Hash]snapshot{base.root: base} | ||||
| 			return nil | ||||
| 		} | ||||
| 		// Merge the new aggregated layer into the snapshot tree, clean stales below
 | ||||
| 		t.layers[bottom.root] = bottom | ||||
| 
 | ||||
| 	default: | ||||
| 		diskNumber, diffNumber = st.cap(diff, layers, memory) | ||||
| 		// Many layers requested to be retained, cap normally
 | ||||
| 		t.cap(diff, layers, memory) | ||||
| 	} | ||||
| 	for root, snap := range st.layers { | ||||
| 		if number, _ := snap.Info(); number != diskNumber && number < diffNumber { | ||||
| 			delete(st.layers, root) | ||||
| 	// Remove any layer that is stale or links into a stale layer
 | ||||
| 	children := make(map[common.Hash][]common.Hash) | ||||
| 	for root, snap := range t.layers { | ||||
| 		if diff, ok := snap.(*diffLayer); ok { | ||||
| 			parent := diff.parent.Root() | ||||
| 			children[parent] = append(children[parent], root) | ||||
| 		} | ||||
| 	} | ||||
| 	var remove func(root common.Hash) | ||||
| 	remove = func(root common.Hash) { | ||||
| 		delete(t.layers, root) | ||||
| 		for _, child := range children[root] { | ||||
| 			remove(child) | ||||
| 		} | ||||
| 		delete(children, root) | ||||
| 	} | ||||
| 	for root, snap := range t.layers { | ||||
| 		if snap.Stale() { | ||||
| 			remove(root) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // cap traverses downwards the diff tree until the number of allowed layers are
 | ||||
| // crossed. All diffs beyond the permitted number are flattened downwards. If
 | ||||
| // the layer limit is reached, memory cap is also enforced (but not before). The
 | ||||
| // block numbers for the disk layer and first diff layer are returned for GC.
 | ||||
| func (st *SnapshotTree) cap(diff *diffLayer, layers int, memory uint64) (uint64, uint64) { | ||||
| // crossed. All diffs beyond the permitted number are flattened downwards. If the
 | ||||
| // layer limit is reached, memory cap is also enforced (but not before).
 | ||||
| func (t *Tree) cap(diff *diffLayer, layers int, memory uint64) { | ||||
| 	// Dive until we run out of layers or reach the persistent database
 | ||||
| 	for ; layers > 2; layers-- { | ||||
| 		// If we still have diff layers below, continue down
 | ||||
| 		if parent, ok := diff.parent.(*diffLayer); ok { | ||||
| 			diff = parent | ||||
| 		} else { | ||||
| 			// Diff stack too shallow, return block numbers without modifications
 | ||||
| 			return diff.parent.(*diskLayer).number, diff.number | ||||
| 			// Diff stack too shallow, return without modifications
 | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	// We're out of layers, flatten anything below, stopping if it's the disk or if
 | ||||
| 	// the memory limit is not yet exceeded.
 | ||||
| 	switch parent := diff.parent.(type) { | ||||
| 	case *diskLayer: | ||||
| 		return parent.number, diff.number | ||||
| 		return | ||||
| 
 | ||||
| 	case *diffLayer: | ||||
| 		// Flatten the parent into the grandparent. The flattening internally obtains a
 | ||||
| 		// write lock on grandparent.
 | ||||
| 		flattened := parent.flatten().(*diffLayer) | ||||
| 		st.layers[flattened.root] = flattened | ||||
| 		t.layers[flattened.root] = flattened | ||||
| 
 | ||||
| 		diff.lock.Lock() | ||||
| 		defer diff.lock.Unlock() | ||||
| 
 | ||||
| 		diff.parent = flattened | ||||
| 		if flattened.memory < memory { | ||||
| 			diskNumber, _ := flattened.parent.Info() | ||||
| 			return diskNumber, flattened.number | ||||
| 			return | ||||
| 		} | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("unknown data layer: %T", parent)) | ||||
| @ -269,10 +295,8 @@ func (st *SnapshotTree) cap(diff *diffLayer, layers int, memory uint64) (uint64, | ||||
| 	base := diffToDisk(bottom) | ||||
| 	bottom.lock.RUnlock() | ||||
| 
 | ||||
| 	st.layers[base.root] = base | ||||
| 	t.layers[base.root] = base | ||||
| 	diff.parent = base | ||||
| 
 | ||||
| 	return base.number, diff.number | ||||
| } | ||||
| 
 | ||||
| // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
 | ||||
| @ -284,7 +308,7 @@ func diffToDisk(bottom *diffLayer) *diskLayer { | ||||
| 	) | ||||
| 	// Start by temporarily deleting the current snapshot block marker. This
 | ||||
| 	// ensures that in the case of a crash, the entire snapshot is invalidated.
 | ||||
| 	rawdb.DeleteSnapshotBlock(batch) | ||||
| 	rawdb.DeleteSnapshotRoot(batch) | ||||
| 
 | ||||
| 	// Mark the original base as stale as we're going to create a new wrapper
 | ||||
| 	base.lock.Lock() | ||||
| @ -341,13 +365,12 @@ func diffToDisk(bottom *diffLayer) *diskLayer { | ||||
| 		} | ||||
| 	} | ||||
| 	// Update the snapshot block marker and write any remainder data
 | ||||
| 	rawdb.WriteSnapshotBlock(batch, bottom.number, bottom.root) | ||||
| 	rawdb.WriteSnapshotRoot(batch, bottom.root) | ||||
| 	if err := batch.Write(); err != nil { | ||||
| 		log.Crit("Failed to write leftover snapshot", "err", err) | ||||
| 	} | ||||
| 	return &diskLayer{ | ||||
| 		root:    bottom.root, | ||||
| 		number:  bottom.number, | ||||
| 		cache:   base.cache, | ||||
| 		db:      base.db, | ||||
| 		journal: base.journal, | ||||
| @ -357,27 +380,25 @@ func diffToDisk(bottom *diffLayer) *diskLayer { | ||||
| // Journal commits an entire diff hierarchy to disk into a single journal file.
 | ||||
| // This is meant to be used during shutdown to persist the snapshot without
 | ||||
| // flattening everything down (bad for reorgs).
 | ||||
| func (st *SnapshotTree) Journal(blockRoot common.Hash) error { | ||||
| 	// Retrieve the head snapshot to journal from
 | ||||
| 	var snap snapshot | ||||
| 	if s := st.Snapshot(blockRoot); s == nil { | ||||
| func (t *Tree) Journal(blockRoot common.Hash) error { | ||||
| 	// Retrieve the head snapshot to journal from var snap snapshot
 | ||||
| 	snap := t.Snapshot(blockRoot) | ||||
| 	if snap == nil { | ||||
| 		return fmt.Errorf("snapshot [%#x] missing", blockRoot) | ||||
| 	} else { | ||||
| 		snap = s.(snapshot) | ||||
| 	} | ||||
| 	// Run the journaling
 | ||||
| 	st.lock.Lock() | ||||
| 	defer st.lock.Unlock() | ||||
| 	t.lock.Lock() | ||||
| 	defer t.lock.Unlock() | ||||
| 
 | ||||
| 	return snap.Journal() | ||||
| 	return snap.(snapshot).Journal() | ||||
| } | ||||
| 
 | ||||
| // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
 | ||||
| func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, headRoot common.Hash) (snapshot, error) { | ||||
| func loadSnapshot(db ethdb.KeyValueStore, journal string, root common.Hash) (snapshot, error) { | ||||
| 	// Retrieve the block number and hash of the snapshot, failing if no snapshot
 | ||||
| 	// is present in the database (or crashed mid-update).
 | ||||
| 	number, root := rawdb.ReadSnapshotBlock(db) | ||||
| 	if root == (common.Hash{}) { | ||||
| 	baseRoot := rawdb.ReadSnapshotRoot(db) | ||||
| 	if baseRoot == (common.Hash{}) { | ||||
| 		return nil, errors.New("missing or corrupted snapshot") | ||||
| 	} | ||||
| 	cache, _ := bigcache.NewBigCache(bigcache.Config{ // TODO(karalabe): dedup
 | ||||
| @ -391,16 +412,14 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, hea | ||||
| 		journal: journal, | ||||
| 		db:      db, | ||||
| 		cache:   cache, | ||||
| 		number:  number, | ||||
| 		root:    root, | ||||
| 		root:    baseRoot, | ||||
| 	} | ||||
| 	// Load all the snapshot diffs from the journal, failing if their chain is broken
 | ||||
| 	// or does not lead from the disk snapshot to the specified head.
 | ||||
| 	if _, err := os.Stat(journal); os.IsNotExist(err) { | ||||
| 		// Journal doesn't exist, don't worry if it's not supposed to
 | ||||
| 		if number != headNumber || root != headRoot { | ||||
| 			return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]", | ||||
| 				headNumber, headRoot, number, root) | ||||
| 		if baseRoot != root { | ||||
| 			return nil, fmt.Errorf("snapshot journal missing, head doesn't match snapshot: have %#x, want %#x", baseRoot, root) | ||||
| 		} | ||||
| 		return base, nil | ||||
| 	} | ||||
| @ -414,10 +433,8 @@ func loadSnapshot(db ethdb.KeyValueStore, journal string, headNumber uint64, hea | ||||
| 	} | ||||
| 	// Entire snapshot journal loaded, sanity check the head and return
 | ||||
| 	// Journal doesn't exist, don't worry if it's not supposed to
 | ||||
| 	number, root = snapshot.Info() | ||||
| 	if number != headNumber || root != headRoot { | ||||
| 		return nil, fmt.Errorf("head doesn't match snapshot: #%d [%#x] vs. #%d [%#x]", | ||||
| 			headNumber, headRoot, number, root) | ||||
| 	if head := snapshot.Root(); head != root { | ||||
| 		return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) | ||||
| 	} | ||||
| 	return snapshot, nil | ||||
| } | ||||
|  | ||||
| @ -37,7 +37,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { | ||||
| 		root:  common.HexToHash("0x01"), | ||||
| 		cache: cache, | ||||
| 	} | ||||
| 	snaps := &SnapshotTree{ | ||||
| 	snaps := &Tree{ | ||||
| 		layers: map[common.Hash]snapshot{ | ||||
| 			base.root: base, | ||||
| 		}, | ||||
| @ -83,7 +83,7 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { | ||||
| 		root:  common.HexToHash("0x01"), | ||||
| 		cache: cache, | ||||
| 	} | ||||
| 	snaps := &SnapshotTree{ | ||||
| 	snaps := &Tree{ | ||||
| 		layers: map[common.Hash]snapshot{ | ||||
| 			base.root: base, | ||||
| 		}, | ||||
| @ -132,7 +132,7 @@ func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) { | ||||
| 		root:  common.HexToHash("0x01"), | ||||
| 		cache: cache, | ||||
| 	} | ||||
| 	snaps := &SnapshotTree{ | ||||
| 	snaps := &Tree{ | ||||
| 		layers: map[common.Hash]snapshot{ | ||||
| 			base.root: base, | ||||
| 		}, | ||||
| @ -181,7 +181,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { | ||||
| 		root:  common.HexToHash("0x01"), | ||||
| 		cache: cache, | ||||
| 	} | ||||
| 	snaps := &SnapshotTree{ | ||||
| 	snaps := &Tree{ | ||||
| 		layers: map[common.Hash]snapshot{ | ||||
| 			base.root: base, | ||||
| 		}, | ||||
| @ -213,7 +213,6 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { | ||||
| 	if got := len(snaps.layers); got != exp { | ||||
| 		t.Errorf("layers modified, got %d exp %d", got, exp) | ||||
| 	} | ||||
| 
 | ||||
| 	// Flatten the diff layer into the bottom accumulator
 | ||||
| 	if err := snaps.Cap(common.HexToHash("0x04"), 2, 1024*1024); err != nil { | ||||
| 		t.Fatalf("failed to flatten diff layer into accumulator: %v", err) | ||||
| @ -247,7 +246,7 @@ func TestPostCapBasicDataAccess(t *testing.T) { | ||||
| 		root:  common.HexToHash("0x01"), | ||||
| 		cache: cache, | ||||
| 	} | ||||
| 	snaps := &SnapshotTree{ | ||||
| 	snaps := &Tree{ | ||||
| 		layers: map[common.Hash]snapshot{ | ||||
| 			base.root: base, | ||||
| 		}, | ||||
|  | ||||
| @ -68,7 +68,7 @@ type StateDB struct { | ||||
| 	db   Database | ||||
| 	trie Trie | ||||
| 
 | ||||
| 	snaps        *snapshot.SnapshotTree | ||||
| 	snaps        *snapshot.Tree | ||||
| 	snap         snapshot.Snapshot | ||||
| 	snapAccounts map[common.Hash][]byte | ||||
| 	snapStorage  map[common.Hash]map[common.Hash][]byte | ||||
| @ -117,7 +117,7 @@ type StateDB struct { | ||||
| } | ||||
| 
 | ||||
| // Create a new state from a given trie.
 | ||||
| func New(root common.Hash, db Database, snaps *snapshot.SnapshotTree) (*StateDB, error) { | ||||
| func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) { | ||||
| 	tr, err := db.OpenTrie(root) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| @ -840,12 +840,14 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { | ||||
| 		if metrics.EnabledExpensive { | ||||
| 			defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now()) | ||||
| 		} | ||||
| 		_, parentRoot := s.snap.Info() | ||||
| 		if err := s.snaps.Update(root, parentRoot, s.snapAccounts, s.snapStorage); err != nil { | ||||
| 			log.Warn("Failed to update snapshot tree", "from", parentRoot, "to", root, "err", err) | ||||
| 		} | ||||
| 		if err := s.snaps.Cap(root, 16, 4*1024*1024); err != nil { | ||||
| 			log.Warn("Failed to cap snapshot tree", "root", root, "layers", 16, "memory", 4*1024*1024, "err", err) | ||||
| 		// Only update if there's a state transition (skip empty Clique blocks)
 | ||||
| 		if parent := s.snap.Root(); parent != root { | ||||
| 			if err := s.snaps.Update(root, parent, s.snapAccounts, s.snapStorage); err != nil { | ||||
| 				log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err) | ||||
| 			} | ||||
| 			if err := s.snaps.Cap(root, 16, 4*1024*1024); err != nil { | ||||
| 				log.Warn("Failed to cap snapshot tree", "root", root, "layers", 16, "memory", 4*1024*1024, "err", err) | ||||
| 			} | ||||
| 		} | ||||
| 		s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil | ||||
| 	} | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user