core, trie: rework trie committer (#25320)

* all: rework trie and trie committer

* all: get rid of internal cache in trie

* all: fixes

* trie: polish

* core, trie: address comments

* trie: fix imports

* core/state: address comments

* core/state/snapshot: polish

* trie: remove unused code

* trie: update tests

* trie: don't set db as nil

* trie: address comments

* trie: unskip test
This commit is contained in:
rjl493456442 2022-08-04 16:03:20 +08:00 committed by GitHub
parent 733d76a88d
commit 8b53b92eb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 587 additions and 432 deletions

View File

@ -1244,7 +1244,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the // writeBlockWithState writes block, metadata and corresponding state data to the
// database. // database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error { func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
// Calculate the total difficulty of the block // Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil { if ptd == nil {
@ -1339,7 +1339,7 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held. // This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, logs, state); err != nil { if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err return NonStatTy, err
} }
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
@ -1703,7 +1703,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
var status WriteStatus var status WriteStatus
if !setHead { if !setHead {
// Don't set the head, only insert the block // Don't set the head, only insert the block
err = bc.writeBlockWithState(block, receipts, logs, statedb) err = bc.writeBlockWithState(block, receipts, statedb)
} else { } else {
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
} }

View File

@ -88,9 +88,13 @@ type Trie interface {
// can be used even if the trie doesn't have one. // can be used even if the trie doesn't have one.
Hash() common.Hash Hash() common.Hash
// Commit writes all nodes to the trie's memory database, tracking the internal // Commit collects all dirty nodes in the trie and replace them with the
// and external (for account tries) references. // corresponding node hash. All collected nodes(including dirty leaves if
Commit(onleaf trie.LeafCallback) (common.Hash, int, error) // collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration // NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key. // starts at the key after the given start key.

View File

@ -19,10 +19,10 @@ package state
import "github.com/ethereum/go-ethereum/metrics" import "github.com/ethereum/go-ethereum/metrics"
var ( var (
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
accountCommittedMeter = metrics.NewRegisteredMeter("state/commit/account", nil) accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil)
storageCommittedMeter = metrics.NewRegisteredMeter("state/commit/storage", nil) storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil)
) )

View File

@ -367,7 +367,10 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo
for i, key := range result.keys { for i, key := range result.keys {
snapTrie.Update(key, result.vals[i]) snapTrie.Update(key, result.vals[i])
} }
root, _, _ := snapTrie.Commit(nil) root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
snapTrieDb.Update(trie.NewWithNodeSet(nodes))
}
snapTrieDb.Commit(root, false, nil) snapTrieDb.Commit(root, false, nil)
} }
// Construct the trie for state iteration, reuse the trie // Construct the trie for state iteration, reuse the trie

View File

@ -143,6 +143,7 @@ type testHelper struct {
diskdb ethdb.Database diskdb ethdb.Database
triedb *trie.Database triedb *trie.Database
accTrie *trie.SecureTrie accTrie *trie.SecureTrie
nodes *trie.MergedNodeSet
} }
func newHelper() *testHelper { func newHelper() *testHelper {
@ -153,6 +154,7 @@ func newHelper() *testHelper {
diskdb: diskdb, diskdb: diskdb,
triedb: triedb, triedb: triedb,
accTrie: accTrie, accTrie: accTrie,
nodes: trie.NewMergedNodeSet(),
} }
} }
@ -184,17 +186,22 @@ func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string
for i, k := range keys { for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i])) stTrie.Update([]byte(k), []byte(vals[i]))
} }
var root common.Hash
if !commit { if !commit {
root = stTrie.Hash() return stTrie.Hash().Bytes()
} else { }
root, _, _ = stTrie.Commit(nil) root, nodes, _ := stTrie.Commit(false)
if nodes != nil {
t.nodes.Merge(nodes)
} }
return root.Bytes() return root.Bytes()
} }
func (t *testHelper) Commit() common.Hash { func (t *testHelper) Commit() common.Hash {
root, _, _ := t.accTrie.Commit(nil) root, nodes, _ := t.accTrie.Commit(true)
if nodes != nil {
t.nodes.Merge(nodes)
}
t.triedb.Update(t.nodes)
t.triedb.Commit(root, false, nil) t.triedb.Commit(root, false, nil)
return root return root
} }
@ -378,7 +385,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
root, _, _ := helper.accTrie.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes // Delete an account trie leaf and ensure the generator chokes
helper.triedb.Commit(root, false, nil) helper.triedb.Commit(root, false, nil)
@ -413,18 +420,8 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root, _, _ := helper.accTrie.Commit(nil)
// We can only corrupt the disk database, so flush the tries out root := helper.Commit()
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
helper.triedb.Commit(root, false, nil)
// Delete a storage trie root and ensure the generator chokes // Delete a storage trie root and ensure the generator chokes
helper.diskdb.Delete(stRoot) helper.diskdb.Delete(stRoot)
@ -458,18 +455,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root, _, _ := helper.accTrie.Commit(nil) root := helper.Commit()
// We can only corrupt the disk database, so flush the tries out
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
helper.triedb.Commit(root, false, nil)
// Delete a storage trie leaf and ensure the generator chokes // Delete a storage trie leaf and ensure the generator chokes
helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
@ -825,10 +811,12 @@ func populateDangling(disk ethdb.KeyValueStore) {
// This test will populate some dangling storages to see if they can be cleaned up. // This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper() var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
@ -858,10 +846,12 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
// This test will populate some dangling storages to see if they can be cleaned up. // This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper() var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
populateDangling(helper.diskdb) populateDangling(helper.diskdb)

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
) )
var emptyCodeHash = crypto.Keccak256(nil) var emptyCodeHash = crypto.Keccak256(nil)
@ -375,23 +376,23 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db. // CommitTrie the storage trie of the object to db.
// This updates the trie root. // This updates the trie root.
func (s *stateObject) CommitTrie(db Database) (int, error) { func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
// If nothing changed, don't bother with hashing anything // If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil { if s.updateTrie(db) == nil {
return 0, nil return nil, nil
} }
if s.dbErr != nil { if s.dbErr != nil {
return 0, s.dbErr return nil, s.dbErr
} }
// Track the amount of time wasted on committing the storage trie // Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
} }
root, committed, err := s.trie.Commit(nil) root, nodes, err := s.trie.Commit(false)
if err == nil { if err == nil {
s.data.Root = root s.data.Root = root
} }
return committed, err return nodes, err
} }
// AddBalance adds amount to s's balance. // AddBalance adds amount to s's balance.

View File

@ -774,7 +774,7 @@ func (s *StateDB) GetRefund() uint64 {
return s.refund return s.refund
} }
// Finalise finalises the state by removing the s destructed objects and clears // Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates // the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that. // into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) { func (s *StateDB) Finalise(deleteEmptyObjects bool) {
@ -844,7 +844,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Although naively it makes sense to retrieve the account trie and then do // Although naively it makes sense to retrieve the account trie and then do
// the contract storage and account updates sequentially, that short circuits // the contract storage and account updates sequentially, that short circuits
// the account prefetcher. Instead, let's process all the storage updates // the account prefetcher. Instead, let's process all the storage updates
// first, giving the account prefeches just a few more milliseconds of time // first, giving the account prefetches just a few more milliseconds of time
// to pull useful data from disk. // to pull useful data from disk.
for addr := range s.stateObjectsPending { for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted { if obj := s.stateObjects[addr]; !obj.deleted {
@ -907,7 +907,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.IntermediateRoot(deleteEmptyObjects) s.IntermediateRoot(deleteEmptyObjects)
// Commit objects to the trie, measuring the elapsed time // Commit objects to the trie, measuring the elapsed time
var storageCommitted int var (
accountTrieNodes int
storageTrieNodes int
nodes = trie.NewMergedNodeSet()
)
codeWriter := s.db.TrieDB().DiskDB().NewBatch() codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty { for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted { if obj := s.stateObjects[addr]; !obj.deleted {
@ -917,11 +921,17 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false obj.dirtyCode = false
} }
// Write any storage changes in the state object to its storage trie // Write any storage changes in the state object to its storage trie
committed, err := obj.CommitTrie(s.db) set, err := obj.CommitTrie(s.db)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
storageCommitted += committed // Merge the dirty nodes of storage trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
storageTrieNodes += set.Len()
}
} }
} }
if len(s.stateObjectsDirty) > 0 { if len(s.stateObjectsDirty) > 0 {
@ -937,21 +947,17 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
start = time.Now() start = time.Now()
} }
// The onleaf func is called _serially_, so we can reuse the same account root, set, err := s.trie.Commit(true)
// for unmarshalling every time.
var account types.StateAccount
root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash, _ []byte) error {
if err := rlp.DecodeBytes(leaf, &account); err != nil {
return nil
}
if account.Root != emptyRoot {
s.db.TrieDB().Reference(account.Root, parent)
}
return nil
})
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
accountTrieNodes = set.Len()
}
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start) s.AccountCommits += time.Since(start)
@ -959,8 +965,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageUpdatedMeter.Mark(int64(s.StorageUpdated)) storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted)) accountDeletedMeter.Mark(int64(s.AccountDeleted))
storageDeletedMeter.Mark(int64(s.StorageDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted))
accountCommittedMeter.Mark(int64(accountCommitted)) accountTrieCommittedMeter.Mark(int64(accountTrieNodes))
storageCommittedMeter.Mark(int64(storageCommitted)) storageTriesCommittedMeter.Mark(int64(storageTrieNodes))
s.AccountUpdated, s.AccountDeleted = 0, 0 s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0
} }
@ -984,6 +990,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
} }
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
} }
if err := s.db.TrieDB().Update(nodes); err != nil {
return common.Hash{}, err
}
s.originalRoot = root s.originalRoot = root
return root, err return root, err
} }

View File

@ -1348,9 +1348,11 @@ func getCodeByHash(hash common.Hash) []byte {
// makeAccountTrieNoStorage spits out a trie, along with the leafs // makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
db := trie.NewDatabase(rawdb.NewMemoryDatabase()) var (
accTrie := trie.NewEmpty(db) db = trie.NewDatabase(rawdb.NewMemoryDatabase())
var entries entrySlice accTrie = trie.NewEmpty(db)
entries entrySlice
)
for i := uint64(1); i <= uint64(n); i++ { for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(&types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
@ -1364,7 +1366,13 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
accTrie.Commit(nil)
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db)
return accTrie, entries return accTrie, entries
} }
@ -1376,8 +1384,8 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
entries entrySlice entries entrySlice
boundaries []common.Hash boundaries []common.Hash
db = trie.NewDatabase(rawdb.NewMemoryDatabase()) db = trie.NewDatabase(rawdb.NewMemoryDatabase())
trie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
) )
// Initialize boundaries // Initialize boundaries
var next common.Hash var next common.Hash
@ -1404,7 +1412,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
CodeHash: getCodeHash(uint64(i)), CodeHash: getCodeHash(uint64(i)),
}) })
elem := &kv{boundaries[i].Bytes(), value} elem := &kv{boundaries[i].Bytes(), value}
trie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
} }
// Fill other accounts if required // Fill other accounts if required
@ -1416,12 +1424,18 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
CodeHash: getCodeHash(i), CodeHash: getCodeHash(i),
}) })
elem := &kv{key32(i), value} elem := &kv{key32(i), value}
trie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
trie.Commit(nil)
return trie, entries // Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db)
return accTrie, entries
} }
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
@ -1431,8 +1445,10 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
db = trie.NewDatabase(rawdb.NewMemoryDatabase()) db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
entries entrySlice entries entrySlice
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie) storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice) storageEntries = make(map[common.Hash]entrySlice)
nodes = trie.NewMergedNodeSet()
) )
// Create n accounts in the trie // Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ { for i := uint64(1); i <= uint64(accounts); i++ {
@ -1442,9 +1458,9 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
codehash = getCodeHash(i) codehash = getCodeHash(i)
} }
// Create a storage trie // Create a storage trie
stTrie, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db) stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
stRoot := stTrie.Hash() nodes.Merge(stNodes)
stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(&types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
@ -1455,12 +1471,25 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
accTrie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
storageTries[common.BytesToHash(key)] = stTrie storageRoots[common.BytesToHash(key)] = stRoot
storageEntries[common.BytesToHash(key)] = stEntries storageEntries[common.BytesToHash(key)] = stEntries
} }
sort.Sort(entries) sort.Sort(entries)
accTrie.Commit(nil) // Commit account trie
root, set, _ := accTrie.Commit(true)
nodes.Merge(set)
// Commit gathered dirty nodes into database
db.Update(nodes)
// Re-create tries with new root
accTrie, _ = trie.New(common.Hash{}, root, db)
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
storageTries[common.BytesToHash(key)] = trie
}
return accTrie, entries, storageTries, storageEntries return accTrie, entries, storageTries, storageEntries
} }
@ -1470,8 +1499,10 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
db = trie.NewDatabase(rawdb.NewMemoryDatabase()) db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db) accTrie = trie.NewEmpty(db)
entries entrySlice entries entrySlice
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie) storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice) storageEntries = make(map[common.Hash]entrySlice)
nodes = trie.NewMergedNodeSet()
) )
// Create n accounts in the trie // Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ { for i := uint64(1); i <= uint64(accounts); i++ {
@ -1482,16 +1513,16 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
} }
// Make a storage trie // Make a storage trie
var ( var (
stTrie *trie.Trie stRoot common.Hash
stNodes *trie.NodeSet
stEntries entrySlice stEntries entrySlice
) )
if boundary { if boundary {
stTrie, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db) stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
} else { } else {
stTrie, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db) stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
} }
stRoot := stTrie.Hash() nodes.Merge(stNodes)
stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(&types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
@ -1502,19 +1533,40 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
elem := &kv{key, value} elem := &kv{key, value}
accTrie.Update(elem.k, elem.v) accTrie.Update(elem.k, elem.v)
entries = append(entries, elem) entries = append(entries, elem)
// we reuse the same one for all accounts // we reuse the same one for all accounts
storageTries[common.BytesToHash(key)] = stTrie storageRoots[common.BytesToHash(key)] = stRoot
storageEntries[common.BytesToHash(key)] = stEntries storageEntries[common.BytesToHash(key)] = stEntries
} }
sort.Sort(entries) sort.Sort(entries)
accTrie.Commit(nil)
// Commit account trie
root, set, _ := accTrie.Commit(true)
nodes.Merge(set)
// Commit gathered dirty nodes into database
db.Update(nodes)
// Re-create tries with new root
accTrie, err := trie.New(common.Hash{}, root, db)
if err != nil {
panic(err)
}
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
if err != nil {
panic(err)
}
storageTries[common.BytesToHash(key)] = trie
}
return accTrie, entries, storageTries, storageEntries return accTrie, entries, storageTries, storageEntries
} }
// makeStorageTrieWithSeed fills a storage trie with n items, returning the // makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique. // that tries are unique.
func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) { func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
trie, _ := trie.New(owner, common.Hash{}, db) trie, _ := trie.New(owner, common.Hash{}, db)
var entries entrySlice var entries entrySlice
for i := uint64(1); i <= n; i++ { for i := uint64(1); i <= n; i++ {
@ -1530,14 +1582,14 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
trie.Commit(nil) root, nodes, _ := trie.Commit(false)
return trie, entries return root, nodes, entries
} }
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling // makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have // storage slots normally, this function will fill a few slots which have
// boundary hash. // boundary hash.
func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie.Trie, entrySlice) { func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
var ( var (
entries entrySlice entries entrySlice
boundaries []common.Hash boundaries []common.Hash
@ -1581,8 +1633,8 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie
entries = append(entries, elem) entries = append(entries, elem)
} }
sort.Sort(entries) sort.Sort(entries)
trie.Commit(nil) root, nodes, _ := trie.Commit(false)
return trie, entries return root, nodes, entries
} }
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {

View File

@ -217,7 +217,18 @@ func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) e
// Commit implements core.ChainIndexerBackend // Commit implements core.ChainIndexerBackend
func (c *ChtIndexerBackend) Commit() error { func (c *ChtIndexerBackend) Commit() error {
root, _, err := c.trie.Commit(nil) root, nodes, err := c.trie.Commit(false)
if err != nil {
return err
}
// Commit trie changes into trie database in case it's not nil.
if nodes != nil {
if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
return err
}
}
// Re-create trie with newly generated root and updated database.
c.trie, err = trie.New(common.Hash{}, root, c.triedb)
if err != nil { if err != nil {
return err return err
} }
@ -453,7 +464,18 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b.trie.Delete(encKey[:]) b.trie.Delete(encKey[:])
} }
} }
root, _, err := b.trie.Commit(nil) root, nodes, err := b.trie.Commit(false)
if err != nil {
return err
}
// Commit trie changes into trie database in case it's not nil.
if nodes != nil {
if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
return err
}
}
// Re-create trie with newly generated root and updated database.
b.trie, err = trie.New(common.Hash{}, root, b.triedb)
if err != nil { if err != nil {
return err return err
} }

View File

@ -137,11 +137,11 @@ func (t *odrTrie) TryDelete(key []byte) error {
}) })
} }
func (t *odrTrie) Commit(onleaf trie.LeafCallback) (common.Hash, int, error) { func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) {
if t.trie == nil { if t.trie == nil {
return t.id.Root, 0, nil return t.id.Root, nil, nil
} }
return t.trie.Commit(onleaf) return t.trie.Commit(collectLeaf)
} }
func (t *odrTrie) Hash() common.Hash { func (t *odrTrie) Hash() common.Hash {

View File

@ -173,10 +173,13 @@ func (f *fuzzer) fuzz() int {
return 0 return 0
} }
// Flush trie -> database // Flush trie -> database
rootA, _, err := trieA.Commit(nil) rootA, nodes, err := trieA.Commit(false)
if err != nil { if err != nil {
panic(err) panic(err)
} }
if nodes != nil {
dbA.Update(trie.NewWithNodeSet(nodes))
}
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
dbA.Commit(rootA, false, nil) dbA.Commit(rootA, false, nil)

View File

@ -51,9 +51,8 @@ const (
opUpdate = iota opUpdate = iota
opDelete opDelete
opGet opGet
opCommit
opHash opHash
opReset opCommit
opItercheckhash opItercheckhash
opProve opProve
opMax // boundary value, not an actual op opMax // boundary value, not an actual op
@ -157,15 +156,18 @@ func runRandTest(rt randTest) error {
if string(v) != want { if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want) rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
} }
case opCommit:
_, _, rt[i].err = tr.Commit(nil)
case opHash: case opHash:
tr.Hash() tr.Hash()
case opReset: case opCommit:
hash, _, err := tr.Commit(nil) hash, nodes, err := tr.Commit(false)
if err != nil { if err != nil {
return err return err
} }
if nodes != nil {
if err := triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
return err
}
}
newtr, err := trie.New(common.Hash{}, hash, triedb) newtr, err := trie.New(common.Hash{}, hash, triedb)
if err != nil { if err != nil {
return err return err

View File

@ -17,72 +17,48 @@
package trie package trie
import ( import (
"errors"
"fmt" "fmt"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow // leaf represents a trie leaf node
// some parallelism but not incur too much memory overhead.
const leafChanSize = 200
// leaf represents a trie leaf value
type leaf struct { type leaf struct {
size int // size of the rlp data (estimate) blob []byte // raw blob of leaf
hash common.Hash // hash of rlp data parent common.Hash // the hash of parent node
node node // the node to commit
path []byte // the path from the root node
} }
// committer is a type used for the trie Commit operation. A committer has some // committer is the tool used for the trie Commit operation. The committer will
// internal preallocated temp space, and also a callback that is invoked when // capture all dirty nodes during the commit process and keep them cached in
// leaves are committed. The leafs are passed through the `leafCh`, to allow // insertion order.
// some level of parallelism.
// By 'some level' of parallelism, it's still the case that all leaves will be
// processed sequentially - onleaf will never be called in parallel or out of order.
type committer struct { type committer struct {
onleaf LeafCallback nodes *NodeSet
leafCh chan *leaf collectLeaf bool
}
// committers live in a global sync.Pool
var committerPool = sync.Pool{
New: func() interface{} {
return &committer{}
},
} }
// newCommitter creates a new committer or picks one from the pool. // newCommitter creates a new committer or picks one from the pool.
func newCommitter() *committer { func newCommitter(owner common.Hash, collectLeaf bool) *committer {
return committerPool.Get().(*committer) return &committer{
} nodes: NewNodeSet(owner),
collectLeaf: collectLeaf,
func returnCommitterToPool(h *committer) { }
h.onleaf = nil
h.leafCh = nil
committerPool.Put(h)
} }
// Commit collapses a node down into a hash node and inserts it into the database // Commit collapses a node down into a hash node and inserts it into the database
func (c *committer) Commit(n node, db *Database) (hashNode, int, error) { func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
if db == nil { h, err := c.commit(nil, n)
return nil, 0, errors.New("no db provided")
}
h, committed, err := c.commit(nil, n, db)
if err != nil { if err != nil {
return nil, 0, err return nil, nil, err
} }
return h.(hashNode), committed, nil return h.(hashNode), c.nodes, nil
} }
// commit collapses a node down into a hash node and inserts it into the database // commit collapses a node down into a hash node and inserts it into the database
func (c *committer) commit(path []byte, n node, db *Database) (node, int, error) { func (c *committer) commit(path []byte, n node) (node, error) {
// if this path is clean, use available cached data // if this path is clean, use available cached data
hash, dirty := n.cache() hash, dirty := n.cache()
if hash != nil && !dirty { if hash != nil && !dirty {
return hash, 0, nil return hash, nil
} }
// Commit children, then parent, and remove the dirty flag. // Commit children, then parent, and remove the dirty flag.
switch cn := n.(type) { switch cn := n.(type) {
@ -92,36 +68,35 @@ func (c *committer) commit(path []byte, n node, db *Database) (node, int, error)
// If the child is fullNode, recursively commit, // If the child is fullNode, recursively commit,
// otherwise it can only be hashNode or valueNode. // otherwise it can only be hashNode or valueNode.
var childCommitted int
if _, ok := cn.Val.(*fullNode); ok { if _, ok := cn.Val.(*fullNode); ok {
childV, committed, err := c.commit(append(path, cn.Key...), cn.Val, db) childV, err := c.commit(append(path, cn.Key...), cn.Val)
if err != nil { if err != nil {
return nil, 0, err return nil, err
} }
collapsed.Val, childCommitted = childV, committed collapsed.Val = childV
} }
// The key needs to be copied, since we're delivering it to database // The key needs to be copied, since we're delivering it to database
collapsed.Key = hexToCompact(cn.Key) collapsed.Key = hexToCompact(cn.Key)
hashedNode := c.store(path, collapsed, db) hashedNode := c.store(path, collapsed)
if hn, ok := hashedNode.(hashNode); ok { if hn, ok := hashedNode.(hashNode); ok {
return hn, childCommitted + 1, nil return hn, nil
} }
return collapsed, childCommitted, nil return collapsed, nil
case *fullNode: case *fullNode:
hashedKids, childCommitted, err := c.commitChildren(path, cn, db) hashedKids, err := c.commitChildren(path, cn)
if err != nil { if err != nil {
return nil, 0, err return nil, err
} }
collapsed := cn.copy() collapsed := cn.copy()
collapsed.Children = hashedKids collapsed.Children = hashedKids
hashedNode := c.store(path, collapsed, db) hashedNode := c.store(path, collapsed)
if hn, ok := hashedNode.(hashNode); ok { if hn, ok := hashedNode.(hashNode); ok {
return hn, childCommitted + 1, nil return hn, nil
} }
return collapsed, childCommitted, nil return collapsed, nil
case hashNode: case hashNode:
return cn, 0, nil return cn, nil
default: default:
// nil, valuenode shouldn't be committed // nil, valuenode shouldn't be committed
panic(fmt.Sprintf("%T: invalid node: %v", n, n)) panic(fmt.Sprintf("%T: invalid node: %v", n, n))
@ -129,11 +104,8 @@ func (c *committer) commit(path []byte, n node, db *Database) (node, int, error)
} }
// commitChildren commits the children of the given fullnode // commitChildren commits the children of the given fullnode
func (c *committer) commitChildren(path []byte, n *fullNode, db *Database) ([17]node, int, error) { func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) {
var ( var children [17]node
committed int
children [17]node
)
for i := 0; i < 16; i++ { for i := 0; i < 16; i++ {
child := n.Children[i] child := n.Children[i]
if child == nil { if child == nil {
@ -149,88 +121,63 @@ func (c *committer) commitChildren(path []byte, n *fullNode, db *Database) ([17]
// Commit the child recursively and store the "hashed" value. // Commit the child recursively and store the "hashed" value.
// Note the returned node can be some embedded nodes, so it's // Note the returned node can be some embedded nodes, so it's
// possible the type is not hashNode. // possible the type is not hashNode.
hashed, childCommitted, err := c.commit(append(path, byte(i)), child, db) hashed, err := c.commit(append(path, byte(i)), child)
if err != nil { if err != nil {
return children, 0, err return children, err
} }
children[i] = hashed children[i] = hashed
committed += childCommitted
} }
// For the 17th child, it's possible the type is valuenode. // For the 17th child, it's possible the type is valuenode.
if n.Children[16] != nil { if n.Children[16] != nil {
children[16] = n.Children[16] children[16] = n.Children[16]
} }
return children, committed, nil return children, nil
} }
// store hashes the node n and if we have a storage layer specified, it writes // store hashes the node n and if we have a storage layer specified, it writes
// the key/value pair to it and tracks any node->child references as well as any // the key/value pair to it and tracks any node->child references as well as any
// node->external trie references. // node->external trie references.
func (c *committer) store(path []byte, n node, db *Database) node { func (c *committer) store(path []byte, n node) node {
// Larger nodes are replaced by their hash and stored in the database. // Larger nodes are replaced by their hash and stored in the database.
var ( var hash, _ = n.cache()
hash, _ = n.cache()
size int // This was not generated - must be a small node stored in the parent.
) // In theory, we should check if the node is leaf here (embedded node
// usually is leaf node). But small value(less than 32bytes) is not
// our target(leaves in account trie only).
if hash == nil { if hash == nil {
// This was not generated - must be a small node stored in the parent.
// In theory, we should apply the leafCall here if it's not nil(embedded
// node usually contains value). But small value(less than 32bytes) is
// not our target.
return n return n
} else {
// We have the hash already, estimate the RLP encoding-size of the node.
// The size is used for mem tracking, does not need to be exact
size = estimateSize(n)
} }
// If we're using channel-based leaf-reporting, send to channel. // We have the hash already, estimate the RLP encoding-size of the node.
// The leaf channel will be active only when there an active leaf-callback // The size is used for mem tracking, does not need to be exact
if c.leafCh != nil { var (
c.leafCh <- &leaf{ size = estimateSize(n)
size: size, nhash = common.BytesToHash(hash)
hash: common.BytesToHash(hash), mnode = &memoryNode{
node: n, hash: nhash,
path: path, node: simplifyNode(n),
size: uint16(size),
}
)
// Collect the dirty node to nodeset for return.
c.nodes.add(string(path), mnode)
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
// length of leaves should be exactly same.
if c.collectLeaf {
if sn, ok := n.(*shortNode); ok {
if val, ok := sn.Val.(valueNode); ok {
c.nodes.addLeaf(&leaf{blob: val, parent: nhash})
}
} }
} else if db != nil {
// No leaf-callback used, but there's still a database. Do serial
// insertion
db.insert(common.BytesToHash(hash), size, n)
} }
return hash return hash
} }
// commitLoop does the actual insert + leaf callback for nodes.
func (c *committer) commitLoop(db *Database) {
for item := range c.leafCh {
var (
hash = item.hash
size = item.size
n = item.node
)
// We are pooling the trie nodes into an intermediate memory cache
db.insert(hash, size, n)
if c.onleaf != nil {
switch n := n.(type) {
case *shortNode:
if child, ok := n.Val.(valueNode); ok {
c.onleaf(nil, nil, child, hash, nil)
}
case *fullNode:
// For children in range [0, 15], it's impossible
// to contain valueNode. Only check the 17th child.
if n.Children[16] != nil {
c.onleaf(nil, nil, n.Children[16].(valueNode), hash, nil)
}
}
}
}
}
// estimateSize estimates the size of an rlp-encoded node, without actually // estimateSize estimates the size of an rlp-encoded node, without actually
// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie // rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie
// with 1000 leafs, the only errors above 1% are on small shortnodes, where this // with 1000 leaves, the only errors above 1% are on small shortnodes, where this
// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) // method overestimates by 2 or 3 bytes (e.g. 37 instead of 35)
func estimateSize(n node) int { func estimateSize(n node) int {
switch n := n.(type) { switch n := n.(type) {

View File

@ -28,6 +28,7 @@ import (
"github.com/VictoriaMetrics/fastcache" "github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
@ -305,14 +306,10 @@ func (db *Database) DiskDB() ethdb.KeyValueStore {
return db.diskdb return db.diskdb
} }
// insert inserts a collapsed trie node into the memory database. // insert inserts a simplified trie node into the memory database.
// The blob size must be specified to allow proper size tracking.
// All nodes inserted by this function will be reference tracked // All nodes inserted by this function will be reference tracked
// and in theory should only used for **trie nodes** insertion. // and in theory should only used for **trie nodes** insertion.
func (db *Database) insert(hash common.Hash, size int, node node) { func (db *Database) insert(hash common.Hash, size int, node node) {
db.lock.Lock()
defer db.lock.Unlock()
// If the node's already cached, skip // If the node's already cached, skip
if _, ok := db.dirties[hash]; ok { if _, ok := db.dirties[hash]; ok {
return return
@ -321,7 +318,7 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
// Create the cached entry for this node // Create the cached entry for this node
entry := &cachedNode{ entry := &cachedNode{
node: simplifyNode(node), node: node,
size: uint16(size), size: uint16(size),
flushPrev: db.newest, flushPrev: db.newest,
} }
@ -763,6 +760,41 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented") panic("not implemented")
} }
// Update inserts the dirty nodes in provided nodeset into database and
// link the account trie with multiple storage tries if necessary.
func (db *Database) Update(nodes *MergedNodeSet) error {
db.lock.Lock()
defer db.lock.Unlock()
// Insert dirty nodes into the database. In the same tree, it must be
// ensured that children are inserted first, then parent so that children
// can be linked with their parent correctly. The order of writing between
// different tries(account trie, storage tries) is not required.
for owner, subset := range nodes.sets {
for _, path := range subset.paths {
n, ok := subset.nodes[path]
if !ok {
return fmt.Errorf("missing node %x %v", owner, path)
}
db.insert(n.hash, int(n.size), n.node)
}
}
// Link up the account trie and storage trie if the node points
// to an account trie leaf.
if set, present := nodes.sets[common.Hash{}]; present {
for _, n := range set.leaves {
var account types.StateAccount
if err := rlp.DecodeBytes(n.blob, &account); err != nil {
return err
}
if account.Root != emptyRoot {
db.reference(account.Root, n.parent)
}
}
}
return nil
}
// Size returns the current storage size of the memory cache in front of the // Size returns the current storage size of the memory cache in front of the
// persistent database layer. // persistent database layer.
func (db *Database) Size() (common.StorageSize, common.StorageSize) { func (db *Database) Size() (common.StorageSize, common.StorageSize) {

View File

@ -375,8 +375,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
} }
} }
} }
resolved, err := it.trie.resolveHash(hash, path) return it.trie.resolveHash(hash, path)
return resolved, err
} }
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) { func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {

View File

@ -31,7 +31,7 @@ import (
) )
func TestEmptyIterator(t *testing.T) { func TestEmptyIterator(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
iter := trie.NodeIterator(nil) iter := trie.NodeIterator(nil)
seen := make(map[string]struct{}) seen := make(map[string]struct{})
@ -44,7 +44,8 @@ func TestEmptyIterator(t *testing.T) {
} }
func TestIterator(t *testing.T) { func TestIterator(t *testing.T) {
trie := newEmpty() db := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(db)
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
{"ether", "wookiedoo"}, {"ether", "wookiedoo"},
@ -59,8 +60,13 @@ func TestIterator(t *testing.T) {
all[val.k] = val.v all[val.k] = val.v
trie.Update([]byte(val.k), []byte(val.v)) trie.Update([]byte(val.k), []byte(val.v))
} }
trie.Commit(nil) root, nodes, err := trie.Commit(false)
if err != nil {
t.Fatalf("Failed to commit trie %v", err)
}
db.Update(NewWithNodeSet(nodes))
trie, _ = New(common.Hash{}, root, db)
found := make(map[string]string) found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil)) it := NewIterator(trie.NodeIterator(nil))
for it.Next() { for it.Next() {
@ -80,7 +86,7 @@ type kv struct {
} }
func TestIteratorLargeData(t *testing.T) { func TestIteratorLargeData(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv) vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ { for i := byte(0); i < 255; i++ {
@ -173,7 +179,7 @@ var testdata2 = []kvs{
} }
func TestIteratorSeek(t *testing.T) { func TestIteratorSeek(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 { for _, val := range testdata1 {
trie.Update([]byte(val.k), []byte(val.v)) trie.Update([]byte(val.k), []byte(val.v))
} }
@ -214,17 +220,23 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
} }
func TestDifferenceIterator(t *testing.T) { func TestDifferenceIterator(t *testing.T) {
triea := newEmpty() dba := NewDatabase(rawdb.NewMemoryDatabase())
triea := NewEmpty(dba)
for _, val := range testdata1 { for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v)) triea.Update([]byte(val.k), []byte(val.v))
} }
triea.Commit(nil) rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
triea, _ = New(common.Hash{}, rootA, dba)
trieb := newEmpty() dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
for _, val := range testdata2 { for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v)) trieb.Update([]byte(val.k), []byte(val.v))
} }
trieb.Commit(nil) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
trieb, _ = New(common.Hash{}, rootB, dbb)
found := make(map[string]string) found := make(map[string]string)
di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
@ -250,17 +262,23 @@ func TestDifferenceIterator(t *testing.T) {
} }
func TestUnionIterator(t *testing.T) { func TestUnionIterator(t *testing.T) {
triea := newEmpty() dba := NewDatabase(rawdb.NewMemoryDatabase())
triea := NewEmpty(dba)
for _, val := range testdata1 { for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v)) triea.Update([]byte(val.k), []byte(val.v))
} }
triea.Commit(nil) rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
triea, _ = New(common.Hash{}, rootA, dba)
trieb := newEmpty() dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
for _, val := range testdata2 { for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v)) trieb.Update([]byte(val.k), []byte(val.v))
} }
trieb.Commit(nil) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
trieb, _ = New(common.Hash{}, rootB, dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
it := NewIterator(di) it := NewIterator(di)
@ -316,7 +334,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
for _, val := range testdata1 { for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v)) tr.Update([]byte(val.k), []byte(val.v))
} }
tr.Commit(nil) _, nodes, _ := tr.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
if !memonly { if !memonly {
triedb.Commit(tr.Hash(), true, nil) triedb.Commit(tr.Hash(), true, nil)
} }
@ -407,7 +426,8 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
for _, val := range testdata1 { for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v)) ctr.Update([]byte(val.k), []byte(val.v))
} }
root, _, _ := ctr.Commit(nil) root, nodes, _ := ctr.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
if !memonly { if !memonly {
triedb.Commit(root, true, nil) triedb.Commit(root, true, nil)
} }
@ -525,7 +545,8 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
val = crypto.Keccak256(val) val = crypto.Keccak256(val)
trie.Update(key, val) trie.Update(key, val)
} }
trie.Commit(nil) _, nodes, _ := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
// Return the generated trie // Return the generated trie
return triedb, trie, logDb return triedb, trie, logDb
} }
@ -564,7 +585,8 @@ func TestIteratorNodeBlob(t *testing.T) {
all[val.k] = val.v all[val.k] = val.v
trie.Update([]byte(val.k), []byte(val.v)) trie.Update([]byte(val.k), []byte(val.v))
} }
trie.Commit(nil) _, nodes, _ := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
triedb.Cap(0) triedb.Cap(0)
found := make(map[common.Hash][]byte) found := make(map[common.Hash][]byte)

94
trie/nodeset.go Normal file
View File

@ -0,0 +1,94 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
)
// memoryNode is all the information we know about a single cached trie node
// in the memory.
type memoryNode struct {
hash common.Hash // Node hash, computed by hashing rlp value
size uint16 // Byte size of the useful cached data
node node // Cached collapsed trie node, or raw rlp data
}
// NodeSet contains all dirty nodes collected during the commit operation.
// Each node is keyed by path. It's not thread-safe to use.
type NodeSet struct {
owner common.Hash // the identifier of the trie
paths []string // the path of dirty nodes, sort by insertion order
nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path
leaves []*leaf // the list of dirty leaves
}
// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
// from a specific account or storage trie. The owner is zero for the account
// trie and the owning account address hash for storage tries.
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
owner: owner,
nodes: make(map[string]*memoryNode),
}
}
// add caches node with provided path and node object.
func (set *NodeSet) add(path string, node *memoryNode) {
set.paths = append(set.paths, path)
set.nodes[path] = node
}
// addLeaf caches the provided leaf node.
func (set *NodeSet) addLeaf(node *leaf) {
set.leaves = append(set.leaves, node)
}
// Len returns the number of dirty nodes contained in the set.
func (set *NodeSet) Len() int {
return len(set.nodes)
}
// MergedNodeSet represents a merged dirty node set for a group of tries.
type MergedNodeSet struct {
sets map[common.Hash]*NodeSet
}
// NewMergedNodeSet initializes an empty merged set.
func NewMergedNodeSet() *MergedNodeSet {
return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)}
}
// NewWithNodeSet constructs a merged nodeset with the provided single set.
func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
merged := NewMergedNodeSet()
merged.Merge(set)
return merged
}
// Merge merges the provided dirty nodes of a trie into the set. The assumption
// is held that no duplicated set belonging to the same trie will be merged twice.
func (set *MergedNodeSet) Merge(other *NodeSet) error {
_, present := set.sets[other.owner]
if present {
return fmt.Errorf("duplicate trie for owner %#x", other.owner)
}
set.sets[other.owner] = other
return nil
}

View File

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -35,9 +36,12 @@ import (
// with the node that proves the absence of the key. // with the node that proves the absence of the key.
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
// Collect all nodes on the path to key. // Collect all nodes on the path to key.
var (
prefix []byte
nodes []node
tn = t.root
)
key = keybytesToHex(key) key = keybytesToHex(key)
var nodes []node
tn := t.root
for len(key) > 0 && tn != nil { for len(key) > 0 && tn != nil {
switch n := tn.(type) { switch n := tn.(type) {
case *shortNode: case *shortNode:
@ -46,16 +50,18 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
tn = nil tn = nil
} else { } else {
tn = n.Val tn = n.Val
prefix = append(prefix, n.Key...)
key = key[len(n.Key):] key = key[len(n.Key):]
} }
nodes = append(nodes, n) nodes = append(nodes, n)
case *fullNode: case *fullNode:
tn = n.Children[key[0]] tn = n.Children[key[0]]
prefix = append(prefix, key[0])
key = key[1:] key = key[1:]
nodes = append(nodes, n) nodes = append(nodes, n)
case hashNode: case hashNode:
var err error var err error
tn, err = t.resolveHash(n, nil) tn, err = t.resolveHash(n, prefix)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
return err return err
@ -553,7 +559,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
} }
// Rebuild the trie with the leaf stream, the shape of trie // Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one. // should be same with the original one.
tr := newWithRootNode(root) tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())}
if empty { if empty {
tr.root = nil tr.root = nil
} }

View File

@ -160,12 +160,14 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
return t.preimages.preimage(common.BytesToHash(shaKey)) return t.preimages.preimage(common.BytesToHash(shaKey))
} }
// Commit writes all nodes and the secure hash pre-images to the trie's database. // Commit collects all dirty nodes in the trie and replace them with the
// Nodes are stored with their sha3 hash as the key. // corresponding node hash. All collected nodes(including dirty leaves if
// // collectLeaf is true) will be encapsulated into a nodeset for return.
// Committing flushes nodes from memory. Subsequent Get calls will load nodes // The returned nodeset can be nil if the trie is clean(nothing to commit).
// from the database. // All cached preimages will be also flushed if preimages recording is enabled.
func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) { // Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
// Write all the pre-images to the actual disk database // Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 { if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil { if t.preimages != nil {
@ -178,7 +180,7 @@ func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
t.secKeyCache = make(map[string][]byte) t.secKeyCache = make(map[string][]byte)
} }
// Commit the trie to its intermediate node database // Commit the trie to its intermediate node database
return t.trie.Commit(onleaf) return t.trie.Commit(collectLeaf)
} }
// Hash returns the root hash of SecureTrie. It does not write to the // Hash returns the root hash of SecureTrie. It does not write to the

View File

@ -18,6 +18,7 @@ package trie
import ( import (
"bytes" "bytes"
"fmt"
"runtime" "runtime"
"sync" "sync"
"testing" "testing"
@ -57,9 +58,15 @@ func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val) trie.Update(key, val)
} }
} }
trie.Commit(nil) root, nodes, err := trie.Commit(false)
if err != nil {
// Return the generated trie panic(fmt.Errorf("failed to commit trie %v", err))
}
if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
// Re-create the trie based on the new state
trie, _ = NewSecure(common.Hash{}, root, triedb)
return triedb, trie, content return triedb, trie, content
} }
@ -135,7 +142,7 @@ func TestSecureTrieConcurrency(t *testing.T) {
tries[index].Update(key, val) tries[index].Update(key, val)
} }
} }
tries[index].Commit(nil) tries[index].Commit(false)
}(i) }(i)
} }
// Wait for all threads to finish // Wait for all threads to finish

View File

@ -18,6 +18,7 @@ package trie
import ( import (
"bytes" "bytes"
"fmt"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -50,9 +51,15 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val) trie.Update(key, val)
} }
} }
trie.Commit(nil) root, nodes, err := trie.Commit(false)
if err != nil {
// Return the generated trie panic(fmt.Errorf("failed to commit trie %v", err))
}
if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
// Re-create the trie based on the new state
trie, _ = NewSecure(common.Hash{}, root, triedb)
return triedb, trie, content return triedb, trie, content
} }

View File

@ -21,10 +21,8 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -55,23 +53,28 @@ var (
// for extracting the raw states(leaf nodes) with corresponding paths. // for extracting the raw states(leaf nodes) with corresponding paths.
type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
// Trie is a Merkle Patricia Trie. // Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
// The zero value is an empty trie with no database. // top of a database. Whenever trie performs a commit operation, the generated
// Use New to create a trie that sits on top of a database. // nodes will be gathered and returned in a set. Once the trie is committed,
// it's not usable anymore. Callers have to re-create the trie with new root
// based on the updated trie database.
// //
// Trie is not safe for concurrent use. // Trie is not safe for concurrent use.
type Trie struct { type Trie struct {
db *Database
root node root node
owner common.Hash owner common.Hash
// Keep track of the number leaves which have been inserted since the last // Keep track of the number leaves which have been inserted since the last
// hashing operation. This number will not directly map to the number of // hashing operation. This number will not directly map to the number of
// actually unhashed nodes // actually unhashed nodes.
unhashed int unhashed int
// tracer is the state diff tracer can be used to track newly added/deleted // db is the handler trie can retrieve nodes from. It's
// trie node. It will be reset after each commit operation. // only for reading purpose and not available for writing.
db *Database
// tracer is the tool to track the trie changes.
// It will be reset after each commit operation.
tracer *tracer tracer *tracer
} }
@ -83,10 +86,10 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie. // Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie { func (t *Trie) Copy() *Trie {
return &Trie{ return &Trie{
db: t.db,
root: t.root, root: t.root,
owner: t.owner, owner: t.owner,
unhashed: t.unhashed, unhashed: t.unhashed,
db: t.db,
tracer: t.tracer.copy(), tracer: t.tracer.copy(),
} }
} }
@ -99,33 +102,9 @@ func (t *Trie) Copy() *Trie {
// New will panic if db is nil and returns a MissingNodeError if root does // New will panic if db is nil and returns a MissingNodeError if root does
// not exist in the database. Accessing the trie loads nodes from db on demand. // not exist in the database. Accessing the trie loads nodes from db on demand.
func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) { func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
return newTrie(owner, root, db)
}
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
func NewEmpty(db *Database) *Trie {
tr, _ := newTrie(common.Hash{}, common.Hash{}, db)
return tr
}
// newWithRootNode initializes the trie with the given root node.
// It's only used by range prover.
func newWithRootNode(root node) *Trie {
return &Trie{
root: root,
//tracer: newTracer(),
db: NewDatabase(rawdb.NewMemoryDatabase()),
}
}
// newTrie is the internal function used to construct the trie with given parameters.
func newTrie(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
if db == nil {
panic("trie.New called without a database")
}
trie := &Trie{ trie := &Trie{
db: db,
owner: owner, owner: owner,
db: db,
//tracer: newTracer(), //tracer: newTracer(),
} }
if root != (common.Hash{}) && root != emptyRoot { if root != (common.Hash{}) && root != emptyRoot {
@ -138,6 +117,12 @@ func newTrie(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
return trie, nil return trie, nil
} }
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
func NewEmpty(db *Database) *Trie {
tr, _ := New(common.Hash{}, common.Hash{}, db)
return tr
}
// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at // NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
// the key after the given start key. // the key after the given start key.
func (t *Trie) NodeIterator(start []byte) NodeIterator { func (t *Trie) NodeIterator(start []byte) NodeIterator {
@ -512,7 +497,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// shortNode{..., shortNode{...}}. Since the entry // shortNode{..., shortNode{...}}. Since the entry
// might not be loaded yet, resolve it just for this // might not be loaded yet, resolve it just for this
// check. // check.
cnode, err := t.resolve(n.Children[pos], prefix) cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos)))
if err != nil { if err != nil {
return false, nil, err return false, nil, err
} }
@ -572,6 +557,8 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
return n, nil return n, nil
} }
// resolveHash loads node from the underlying database with the provided
// node hash and path prefix.
func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
hash := common.BytesToHash(n) hash := common.BytesToHash(n)
if node := t.db.node(hash); node != nil { if node := t.db.node(hash); node != nil {
@ -580,6 +567,8 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix}
} }
// resolveHash loads rlp-encoded node blob from the underlying database
// with the provided node hash and path prefix.
func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) { func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) {
hash := common.BytesToHash(n) hash := common.BytesToHash(n)
blob, _ := t.db.Node(hash) blob, _ := t.db.Node(hash)
@ -597,56 +586,37 @@ func (t *Trie) Hash() common.Hash {
return common.BytesToHash(hash.(hashNode)) return common.BytesToHash(hash.(hashNode))
} }
// Commit writes all nodes to the trie's memory database, tracking the internal // Commit collects all dirty nodes in the trie and replace them with the
// and external (for account tries) references. // corresponding node hash. All collected nodes(including dirty leaves if
func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) { // collectLeaf is true) will be encapsulated into a nodeset for return.
if t.db == nil { // The returned nodeset can be nil if the trie is clean(nothing to commit).
panic("commit called on trie with nil database") // Once the trie is committed, it's not usable anymore. A new trie must
} // be created with new root and updated trie database for following usage
func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
defer t.tracer.reset() defer t.tracer.reset()
if t.root == nil { if t.root == nil {
return emptyRoot, 0, nil return emptyRoot, nil, nil
} }
// Derive the hash for all dirty nodes first. We hold the assumption // Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed. // in the following procedure that all nodes are hashed.
rootHash := t.Hash() rootHash := t.Hash()
h := newCommitter()
defer returnCommitterToPool(h)
// Do a quick check if we really need to commit, before we spin // Do a quick check if we really need to commit. This can happen e.g.
// up goroutines. This can happen e.g. if we load a trie for reading storage // if we load a trie for reading storage values, but don't write to it.
// values, but don't write to it.
if hashedNode, dirty := t.root.cache(); !dirty { if hashedNode, dirty := t.root.cache(); !dirty {
// Replace the root node with the origin hash in order to // Replace the root node with the origin hash in order to
// ensure all resolved nodes are dropped after the commit. // ensure all resolved nodes are dropped after the commit.
t.root = hashedNode t.root = hashedNode
return rootHash, 0, nil return rootHash, nil, nil
}
var wg sync.WaitGroup
if onleaf != nil {
h.onleaf = onleaf
h.leafCh = make(chan *leaf, leafChanSize)
wg.Add(1)
go func() {
defer wg.Done()
h.commitLoop(t.db)
}()
}
newRoot, committed, err := h.Commit(t.root, t.db)
if onleaf != nil {
// The leafch is created in newCommitter if there was an onleaf callback
// provided. The commitLoop only _reads_ from it, and the commit
// operation was the sole writer. Therefore, it's safe to close this
// channel here.
close(h.leafCh)
wg.Wait()
} }
h := newCommitter(t.owner, collectLeaf)
newRoot, nodes, err := h.Commit(t.root)
if err != nil { if err != nil {
return common.Hash{}, 0, err return common.Hash{}, nil, err
} }
t.root = newRoot t.root = newRoot
return rootHash, committed, nil return rootHash, nodes, nil
} }
// hashRoot calculates the root hash of the given trie // hashRoot calculates the root hash of the given trie
@ -667,10 +637,6 @@ func (t *Trie) Reset() {
t.root = nil t.root = nil
t.owner = common.Hash{} t.owner = common.Hash{}
t.unhashed = 0 t.unhashed = 0
//t.db = nil
t.tracer.reset() t.tracer.reset()
} }
// Owner returns the associated trie owner.
func (t *Trie) Owner() common.Hash {
return t.owner
}

View File

@ -24,7 +24,6 @@ import (
"hash" "hash"
"math/big" "math/big"
"math/rand" "math/rand"
"os"
"reflect" "reflect"
"testing" "testing"
"testing/quick" "testing/quick"
@ -35,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
@ -46,12 +44,6 @@ func init() {
spew.Config.DisableMethods = false spew.Config.DisableMethods = false
} }
// Used for testing
func newEmpty() *Trie {
trie := NewEmpty(NewDatabase(memorydb.New()))
return trie
}
func TestEmptyTrie(t *testing.T) { func TestEmptyTrie(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
res := trie.Hash() res := trie.Hash()
@ -91,7 +83,8 @@ func testMissingNode(t *testing.T, memonly bool) {
trie := NewEmpty(triedb) trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, _, _ := trie.Commit(nil) root, nodes, _ := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
if !memonly { if !memonly {
triedb.Commit(root, true, nil) triedb.Commit(root, true, nil)
} }
@ -157,7 +150,7 @@ func testMissingNode(t *testing.T, memonly bool) {
} }
func TestInsert(t *testing.T) { func TestInsert(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "doe", "reindeer") updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy") updateString(trie, "dog", "puppy")
@ -169,11 +162,11 @@ func TestInsert(t *testing.T) {
t.Errorf("case 1: exp %x got %x", exp, root) t.Errorf("case 1: exp %x got %x", exp, root)
} }
trie = newEmpty() trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
root, _, err := trie.Commit(nil) root, _, err := trie.Commit(false)
if err != nil { if err != nil {
t.Fatalf("commit error: %v", err) t.Fatalf("commit error: %v", err)
} }
@ -183,7 +176,8 @@ func TestInsert(t *testing.T) {
} }
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
trie := newEmpty() db := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(db)
updateString(trie, "doe", "reindeer") updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy") updateString(trie, "dog", "puppy")
updateString(trie, "dogglesworth", "cat") updateString(trie, "dogglesworth", "cat")
@ -193,21 +187,21 @@ func TestGet(t *testing.T) {
if !bytes.Equal(res, []byte("puppy")) { if !bytes.Equal(res, []byte("puppy")) {
t.Errorf("expected puppy got %x", res) t.Errorf("expected puppy got %x", res)
} }
unknown := getString(trie, "unknown") unknown := getString(trie, "unknown")
if unknown != nil { if unknown != nil {
t.Errorf("expected nil got %x", unknown) t.Errorf("expected nil got %x", unknown)
} }
if i == 1 { if i == 1 {
return return
} }
trie.Commit(nil) root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
trie, _ = New(common.Hash{}, root, db)
} }
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
{"ether", "wookiedoo"}, {"ether", "wookiedoo"},
@ -234,7 +228,7 @@ func TestDelete(t *testing.T) {
} }
func TestEmptyValues(t *testing.T) { func TestEmptyValues(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
@ -258,7 +252,8 @@ func TestEmptyValues(t *testing.T) {
} }
func TestReplication(t *testing.T) { func TestReplication(t *testing.T) {
trie := newEmpty() triedb := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(triedb)
vals := []struct{ k, v string }{ vals := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
{"ether", "wookiedoo"}, {"ether", "wookiedoo"},
@ -271,13 +266,14 @@ func TestReplication(t *testing.T) {
for _, val := range vals { for _, val := range vals {
updateString(trie, val.k, val.v) updateString(trie, val.k, val.v)
} }
exp, _, err := trie.Commit(nil) exp, nodes, err := trie.Commit(false)
if err != nil { if err != nil {
t.Fatalf("commit error: %v", err) t.Fatalf("commit error: %v", err)
} }
triedb.Update(NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work. // create a new trie on top of the database and check that lookups work.
trie2, err := New(common.Hash{}, exp, trie.db) trie2, err := New(common.Hash{}, exp, triedb)
if err != nil { if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err) t.Fatalf("can't recreate trie at %x: %v", exp, err)
} }
@ -286,7 +282,7 @@ func TestReplication(t *testing.T) {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v) t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
} }
} }
hash, _, err := trie2.Commit(nil) hash, nodes, err := trie2.Commit(false)
if err != nil { if err != nil {
t.Fatalf("commit error: %v", err) t.Fatalf("commit error: %v", err)
} }
@ -294,6 +290,14 @@ func TestReplication(t *testing.T) {
t.Errorf("root failure. expected %x got %x", exp, hash) t.Errorf("root failure. expected %x got %x", exp, hash)
} }
// recreate the trie after commit
if nodes != nil {
triedb.Update(NewWithNodeSet(nodes))
}
trie2, err = New(common.Hash{}, hash, triedb)
if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err)
}
// perform some insertions on the new trie. // perform some insertions on the new trie.
vals2 := []struct{ k, v string }{ vals2 := []struct{ k, v string }{
{"do", "verb"}, {"do", "verb"},
@ -315,7 +319,7 @@ func TestReplication(t *testing.T) {
} }
func TestLargeValue(t *testing.T) { func TestLargeValue(t *testing.T) {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
trie.Update([]byte("key1"), []byte{99, 99, 99, 99}) trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32)) trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash() trie.Hash()
@ -369,9 +373,8 @@ const (
opUpdate = iota opUpdate = iota
opDelete opDelete
opGet opGet
opCommit
opHash opHash
opReset opCommit
opItercheckhash opItercheckhash
opNodeDiff opNodeDiff
opMax // boundary value, not an actual op opMax // boundary value, not an actual op
@ -433,17 +436,17 @@ func runRandTest(rt randTest) bool {
if string(v) != want { if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want) rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
} }
case opCommit:
_, _, rt[i].err = tr.Commit(nil)
origTrie = tr.Copy()
case opHash: case opHash:
tr.Hash() tr.Hash()
case opReset: case opCommit:
hash, _, err := tr.Commit(nil) hash, nodes, err := tr.Commit(false)
if err != nil { if err != nil {
rt[i].err = err rt[i].err = err
return false return false
} }
if nodes != nil {
triedb.Update(NewWithNodeSet(nodes))
}
newtr, err := New(common.Hash{}, hash, triedb) newtr, err := New(common.Hash{}, hash, triedb)
if err != nil { if err != nil {
rt[i].err = err rt[i].err = err
@ -533,44 +536,31 @@ func TestRandom(t *testing.T) {
} }
} }
func BenchmarkGet(b *testing.B) { benchGet(b, false) } func BenchmarkGet(b *testing.B) { benchGet(b) }
func BenchmarkGetDB(b *testing.B) { benchGet(b, true) }
func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) } func BenchmarkUpdateBE(b *testing.B) { benchUpdate(b, binary.BigEndian) }
func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) } func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000 const benchElemCount = 20000
func benchGet(b *testing.B, commit bool) { func benchGet(b *testing.B) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) triedb := NewDatabase(rawdb.NewMemoryDatabase())
if commit { trie := NewEmpty(triedb)
tmpdb := tempDB(b)
trie = NewEmpty(tmpdb)
}
k := make([]byte, 32) k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ { for i := 0; i < benchElemCount; i++ {
binary.LittleEndian.PutUint64(k, uint64(i)) binary.LittleEndian.PutUint64(k, uint64(i))
trie.Update(k, k) trie.Update(k, k)
} }
binary.LittleEndian.PutUint64(k, benchElemCount/2) binary.LittleEndian.PutUint64(k, benchElemCount/2)
if commit {
trie.Commit(nil)
}
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
trie.Get(k) trie.Get(k)
} }
b.StopTimer() b.StopTimer()
if commit {
ldb := trie.db.diskdb.(*leveldb.Database)
ldb.Close()
os.RemoveAll(ldb.Path())
}
} }
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
k := make([]byte, 32) k := make([]byte, 32)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -600,7 +590,7 @@ func BenchmarkHash(b *testing.B) {
// entries, then adding N more. // entries, then adding N more.
addresses, accounts := makeAccounts(2 * b.N) addresses, accounts := makeAccounts(2 * b.N)
// Insert the accounts into the trie and hash it // Insert the accounts into the trie and hash it
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
i := 0 i := 0
for ; i < len(addresses)/2; i++ { for ; i < len(addresses)/2; i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
@ -621,22 +611,17 @@ func BenchmarkHash(b *testing.B) {
// insert into the trie before measuring the hashing. // insert into the trie before measuring the hashing.
func BenchmarkCommitAfterHash(b *testing.B) { func BenchmarkCommitAfterHash(b *testing.B) {
b.Run("no-onleaf", func(b *testing.B) { b.Run("no-onleaf", func(b *testing.B) {
benchmarkCommitAfterHash(b, nil) benchmarkCommitAfterHash(b, false)
}) })
var a types.StateAccount
onleaf := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash, parentPath []byte) error {
rlp.DecodeBytes(leaf, &a)
return nil
}
b.Run("with-onleaf", func(b *testing.B) { b.Run("with-onleaf", func(b *testing.B) {
benchmarkCommitAfterHash(b, onleaf) benchmarkCommitAfterHash(b, true)
}) })
} }
func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) { func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic // Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N) addresses, accounts := makeAccounts(b.N)
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -644,13 +629,13 @@ func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) {
trie.Hash() trie.Hash()
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
trie.Commit(onleaf) trie.Commit(collectLeaf)
} }
func TestTinyTrie(t *testing.T) { func TestTinyTrie(t *testing.T) {
// Create a realistic account trie to hash // Create a realistic account trie to hash
_, accounts := makeAccounts(5) _, accounts := makeAccounts(5)
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
t.Errorf("1: got %x, exp %x", root, exp) t.Errorf("1: got %x, exp %x", root, exp)
@ -663,7 +648,7 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp) t.Errorf("3: got %x, exp %x", root, exp)
} }
checktr := NewEmpty(trie.db) checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
it := NewIterator(trie.NodeIterator(nil)) it := NewIterator(trie.NodeIterator(nil))
for it.Next() { for it.Next() {
checktr.Update(it.Key, it.Value) checktr.Update(it.Key, it.Value)
@ -676,19 +661,19 @@ func TestTinyTrie(t *testing.T) {
func TestCommitAfterHash(t *testing.T) { func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash // Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000) addresses, accounts := makeAccounts(1000)
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
// Insert the accounts into the trie and hash it // Insert the accounts into the trie and hash it
trie.Hash() trie.Hash()
trie.Commit(nil) trie.Commit(false)
root := trie.Hash() root := trie.Hash()
exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6") exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6")
if exp != root { if exp != root {
t.Errorf("got %x, exp %x", root, exp) t.Errorf("got %x, exp %x", root, exp)
} }
root, _, _ = trie.Commit(nil) root, _, _ = trie.Commit(false)
if exp != root { if exp != root {
t.Errorf("got %x, exp %x", root, exp) t.Errorf("got %x, exp %x", root, exp)
} }
@ -797,7 +782,8 @@ func TestCommitSequence(t *testing.T) {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
// Flush trie -> database // Flush trie -> database
root, _, _ := trie.Commit(nil) root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) { db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order // And spongify the callback-order
@ -849,7 +835,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
trie.Update(key, val) trie.Update(key, val)
} }
// Flush trie -> database // Flush trie -> database
root, _, _ := trie.Commit(nil) root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) { db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order // And spongify the callback-order
@ -875,7 +862,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(stackTrieSponge) stTrie := NewStackTrie(stackTrieSponge)
// Fill the trie with elements // Fill the trie with elements
for i := 1; i < count; i++ { for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order // For the stack trie, we need to do inserts in proper order
key := make([]byte, 32) key := make([]byte, 32)
binary.BigEndian.PutUint64(key, uint64(i)) binary.BigEndian.PutUint64(key, uint64(i))
@ -891,8 +878,9 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stTrie.TryUpdate(key, val) stTrie.TryUpdate(key, val)
} }
// Flush trie -> database // Flush trie -> database
root, _, _ := trie.Commit(nil) root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Update(NewWithNodeSet(nodes))
db.Commit(root, false, nil) db.Commit(root, false, nil)
// And flush stacktrie -> disk // And flush stacktrie -> disk
stRoot, err := stTrie.Commit() stRoot, err := stTrie.Commit()
@ -936,8 +924,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie.TryUpdate(key, []byte{0x1}) trie.TryUpdate(key, []byte{0x1})
stTrie.TryUpdate(key, []byte{0x1}) stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database // Flush trie -> database
root, _, _ := trie.Commit(nil) root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge) // Flush memdb -> disk (sponge)
db.Update(NewWithNodeSet(nodes))
db.Commit(root, false, nil) db.Commit(root, false, nil)
// And flush stacktrie -> disk // And flush stacktrie -> disk
stRoot, err := stTrie.Commit() stRoot, err := stTrie.Commit()
@ -999,7 +988,7 @@ func BenchmarkHashFixedSize(b *testing.B) {
func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs() b.ReportAllocs()
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
@ -1050,14 +1039,14 @@ func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs() b.ReportAllocs()
trie := newEmpty() trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
// Insert the accounts into the trie and hash it // Insert the accounts into the trie and hash it
trie.Hash() trie.Hash()
b.StartTimer() b.StartTimer()
trie.Commit(nil) trie.Commit(false)
b.StopTimer() b.StopTimer()
} }
@ -1102,26 +1091,19 @@ func BenchmarkDerefRootFixedSize(b *testing.B) {
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs() b.ReportAllocs()
trie := newEmpty() triedb := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ { for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i]) trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
} }
h := trie.Hash() h := trie.Hash()
trie.Commit(nil) _, nodes, _ := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
b.StartTimer() b.StartTimer()
trie.db.Dereference(h) triedb.Dereference(h)
b.StopTimer() b.StopTimer()
} }
func tempDB(tb testing.TB) *Database {
dir := tb.TempDir()
diskdb, err := leveldb.New(dir, 256, 0, "", false)
if err != nil {
panic(fmt.Sprintf("can't create temporary database: %v", err))
}
return NewDatabase(diskdb)
}
func getString(trie *Trie, k string) []byte { func getString(trie *Trie, k string) []byte {
return trie.Get([]byte(k)) return trie.Get([]byte(k))
} }

View File

@ -19,12 +19,14 @@ package trie
import ( import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
) )
// Tests if the trie diffs are tracked correctly. // Tests if the trie diffs are tracked correctly.
func TestTrieTracer(t *testing.T) { func TestTrieTracer(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) db := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(db)
trie.tracer = newTracer() trie.tracer = newTracer()
// Insert a batch of entries, all the nodes should be marked as inserted // Insert a batch of entries, all the nodes should be marked as inserted
@ -65,8 +67,11 @@ func TestTrieTracer(t *testing.T) {
t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) t.Fatalf("Unexpected deleted node tracked %d", len(deleted))
} }
// Commit the changes // Commit the changes and re-create with new root
trie.Commit(nil) root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
trie, _ = New(common.Hash{}, root, db)
trie.tracer = newTracer()
// Delete all the elements, check deletion set // Delete all the elements, check deletion set
for _, val := range vals { for _, val := range vals {