cmd, core, eth, les, light: track deleted nodes (#25757)

* cmd, core, eth, les, light: track deleted nodes

* trie: add docs

* trie: address comments

* cmd, core, eth, les, light, trie: trie id

* trie: add tests

* trie, core: updates

* trie: fix imports

* trie: add utility print-method for nodeset

* trie: import err

* trie: fix go vet warnings

Co-authored-by: Martin Holst Swende <martin@swende.se>
This commit is contained in:
rjl493456442 2022-09-27 16:01:02 +08:00 committed by GitHub
parent fc3e6d0162
commit bff84a99fe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 866 additions and 287 deletions

View File

@ -150,7 +150,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Action: dbDumpTrie, Action: dbDumpTrie,
Name: "dumptrie", Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie", Usage: "Show the storage key/values of a given storage trie",
ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>", ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
@ -486,7 +486,7 @@ func dbPut(ctx *cli.Context) error {
// dbDumpTrie shows the key-value slots of a given storage trie // dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error { func dbDumpTrie(ctx *cli.Context) error {
if ctx.NArg() < 1 { if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
@ -494,30 +494,41 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close() defer db.Close()
var ( var (
root []byte state []byte
start []byte storage []byte
max = int64(-1) account []byte
err error start []byte
max = int64(-1)
err error
) )
if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil { if state, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
log.Info("Could not decode the root", "error", err) log.Info("Could not decode the state root", "error", err)
return err return err
} }
stRoot := common.BytesToHash(root) if account, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
if ctx.NArg() >= 2 { log.Info("Could not decode the account hash", "error", err)
if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil { return err
}
if storage, err = hexutil.Decode(ctx.Args().Get(2)); err != nil {
log.Info("Could not decode the storage trie root", "error", err)
return err
}
if ctx.NArg() > 3 {
if start, err = hexutil.Decode(ctx.Args().Get(3)); err != nil {
log.Info("Could not decode the seek position", "error", err) log.Info("Could not decode the seek position", "error", err)
return err return err
} }
} }
if ctx.NArg() >= 3 { if ctx.NArg() > 4 {
if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { if max, err = strconv.ParseInt(ctx.Args().Get(4), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err) log.Info("Could not decode the max count", "error", err)
return err return err
} }
} }
theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db)) id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
theTrie, err := trie.New(id, trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }

View File

@ -286,7 +286,7 @@ func traverseState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewStateTrie(common.Hash{}, root, triedb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -307,7 +307,8 @@ func traverseState(ctx *cli.Context) error {
return err return err
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.Key), acc.Root, triedb) id := trie.StorageTrieID(root, common.BytesToHash(accIter.Key), acc.Root)
storageTrie, err := trie.NewStateTrie(id, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err return err
@ -375,7 +376,7 @@ func traverseRawState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewStateTrie(common.Hash{}, root, triedb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -421,7 +422,8 @@ func traverseRawState(ctx *cli.Context) error {
return errors.New("invalid account") return errors.New("invalid account")
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb) id := trie.StorageTrieID(root, common.BytesToHash(accIter.LeafKey()), acc.Root)
storageTrie, err := trie.NewStateTrie(id, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie") return errors.New("missing storage trie")

View File

@ -70,6 +70,8 @@ var (
snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
@ -737,10 +739,10 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
if block == nil { if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4]) return fmt.Errorf("non existent block [%x..]", hash[:4])
} }
if _, err := trie.NewStateTrie(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil { root := block.Root()
return err if !bc.HasState(root) {
return fmt.Errorf("non existent state [%x..]", root[:4])
} }
// If all checks out, manually set the head block. // If all checks out, manually set the head block.
if !bc.chainmu.TryLock() { if !bc.chainmu.TryLock() {
return errChainStopped return errChainStopped
@ -752,7 +754,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
// Destroy any existing state snapshot and regenerate it in the background, // Destroy any existing state snapshot and regenerate it in the background,
// also resuming the normal maintenance of any previously paused snapshot. // also resuming the normal maintenance of any previously paused snapshot.
if bc.snaps != nil { if bc.snaps != nil {
bc.snaps.Rebuild(block.Root()) bc.snaps.Rebuild(root)
} }
log.Info("Committed new head block", "number", block.Number(), "hash", hash) log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil return nil
@ -1750,8 +1752,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Triedb commits are complete, we can mark them
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockInsertTimer.UpdateSince(start) blockInsertTimer.UpdateSince(start)
// Report the import stats before returning the various results // Report the import stats before returning the various results

View File

@ -43,7 +43,7 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error) OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account. // OpenStorageTrie opens the storage trie of an account.
OpenStorageTrie(addrHash, root common.Hash) (Trie, error) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
// CopyTrie returns an independent copy of the given trie. // CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie CopyTrie(Trie) Trie
@ -148,7 +148,7 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash. // OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
tr, err := trie.NewStateTrie(common.Hash{}, root, db.db) tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.db)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -156,8 +156,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
} }
// OpenStorageTrie opens the storage trie of an account. // OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) {
tr, err := trie.NewStateTrie(addrHash, root, db.db) tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.db)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -109,7 +109,7 @@ func (it *NodeIterator) step() error {
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
return err return err
} }
dataTrie, err := it.state.db.OpenStorageTrie(common.BytesToHash(it.stateIt.LeafKey()), account.Root) dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root)
if err != nil { if err != nil {
return err return err
} }

View File

@ -19,10 +19,12 @@ package state
import "github.com/ethereum/go-ethereum/metrics" import "github.com/ethereum/go-ethereum/metrics"
var ( var (
accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil) accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil)
storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil) storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
) )

View File

@ -93,7 +93,7 @@ type Pruner struct {
func NewPruner(db ethdb.Database, config Config) (*Pruner, error) { func NewPruner(db ethdb.Database, config Config) (*Pruner, error) {
headBlock := rawdb.ReadHeadBlock(db) headBlock := rawdb.ReadHeadBlock(db)
if headBlock == nil { if headBlock == nil {
return nil, errors.New("Failed to load head block") return nil, errors.New("failed to load head block")
} }
snapconfig := snapshot.Config{ snapconfig := snapshot.Config{
CacheSize: 256, CacheSize: 256,
@ -427,7 +427,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil { if genesis == nil {
return errors.New("missing genesis block") return errors.New("missing genesis block")
} }
t, err := trie.NewStateTrie(common.Hash{}, genesis.Root(), trie.NewDatabase(db)) t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }
@ -447,7 +447,8 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
return err return err
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db)) id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }

View File

@ -166,7 +166,7 @@ func (result *proofResult) forEach(callback func(key []byte, val []byte) error)
// //
// The proof result will be returned if the range proving is finished, otherwise // The proof result will be returned if the range proving is finished, otherwise
// the error will be returned to abort the entire procedure. // the error will be returned to abort the entire procedure.
func (dl *diskLayer) proveRange(ctx *generatorContext, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
var ( var (
keys [][]byte keys [][]byte
vals [][]byte vals [][]byte
@ -233,8 +233,9 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, owner common.Hash, root c
}(time.Now()) }(time.Now())
// The snap state is exhausted, pass the entire key/val set for verification // The snap state is exhausted, pass the entire key/val set for verification
root := trieId.Root
if origin == nil && !diskMore { if origin == nil && !diskMore {
stackTr := trie.NewStackTrieWithOwner(nil, owner) stackTr := trie.NewStackTrie(nil)
for i, key := range keys { for i, key := range keys {
stackTr.TryUpdate(key, vals[i]) stackTr.TryUpdate(key, vals[i])
} }
@ -248,7 +249,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, owner common.Hash, root c
return &proofResult{keys: keys, vals: vals}, nil return &proofResult{keys: keys, vals: vals}, nil
} }
// Snap state is chunked, generate edge proofs for verification. // Snap state is chunked, generate edge proofs for verification.
tr, err := trie.New(owner, root, dl.triedb) tr, err := trie.New(trieId, dl.triedb)
if err != nil { if err != nil {
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return nil, errMissingTrie return nil, errMissingTrie
@ -313,9 +314,9 @@ type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
// generateRange generates the state segment with particular prefix. Generation can // generateRange generates the state segment with particular prefix. Generation can
// either verify the correctness of existing state through range-proof and skip // either verify the correctness of existing state through range-proof and skip
// generation, or iterate trie to regenerate state on demand. // generation, or iterate trie to regenerate state on demand.
func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
// Use range prover to check the validity of the flat state in the range // Use range prover to check the validity of the flat state in the range
result, err := dl.proveRange(ctx, owner, root, prefix, kind, origin, max, valueConvertFn) result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn)
if err != nil { if err != nil {
return false, nil, err return false, nil, err
} }
@ -363,7 +364,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo
if len(result.keys) > 0 { if len(result.keys) > 0 {
snapNodeCache = memorydb.New() snapNodeCache = memorydb.New()
snapTrieDb := trie.NewDatabase(snapNodeCache) snapTrieDb := trie.NewDatabase(snapNodeCache)
snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb) snapTrie := trie.NewEmpty(snapTrieDb)
for i, key := range result.keys { for i, key := range result.keys {
snapTrie.Update(key, result.vals[i]) snapTrie.Update(key, result.vals[i])
} }
@ -377,7 +378,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo
// if it's already opened with some nodes resolved. // if it's already opened with some nodes resolved.
tr := result.tr tr := result.tr
if tr == nil { if tr == nil {
tr, err = trie.New(owner, root, dl.triedb) tr, err = trie.New(trieId, dl.triedb)
if err != nil { if err != nil {
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return false, nil, errMissingTrie return false, nil, errMissingTrie
@ -460,7 +461,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo
} else { } else {
snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
} }
logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last), logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last),
"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
// If there are either more trie items, or there are more snap items // If there are either more trie items, or there are more snap items
@ -511,7 +512,7 @@ func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error
// generateStorages generates the missing storage slots of the specific contract. // generateStorages generates the missing storage slots of the specific contract.
// It's supposed to restart the generation from the given origin position. // It's supposed to restart the generation from the given origin position.
func generateStorages(ctx *generatorContext, dl *diskLayer, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error {
onStorage := func(key []byte, val []byte, write bool, delete bool) error { onStorage := func(key []byte, val []byte, write bool, delete bool) error {
defer func(start time.Time) { defer func(start time.Time) {
snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds())
@ -540,7 +541,8 @@ func generateStorages(ctx *generatorContext, dl *diskLayer, account common.Hash,
// Loop for re-generating the missing storage slots. // Loop for re-generating the missing storage slots.
var origin = common.CopyBytes(storeMarker) var origin = common.CopyBytes(storeMarker)
for { for {
exhausted, last, err := dl.generateRange(ctx, account, storageRoot, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) id := trie.StorageTrieID(stateRoot, account, storageRoot)
exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil)
if err != nil { if err != nil {
return err // The procedure it aborted, either by external signal or internal error. return err // The procedure it aborted, either by external signal or internal error.
} }
@ -624,7 +626,7 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er
if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength {
storeMarker = dl.genMarker[common.HashLength:] storeMarker = dl.genMarker[common.HashLength:]
} }
if err := generateStorages(ctx, dl, account, acc.Root, storeMarker); err != nil { if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil {
return err return err
} }
} }
@ -640,7 +642,8 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er
} }
origin := common.CopyBytes(accMarker) origin := common.CopyBytes(accMarker)
for { for {
exhausted, last, err := dl.generateRange(ctx, common.Hash{}, dl.root, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) id := trie.StateTrieID(dl.root)
exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP)
if err != nil { if err != nil {
return err // The procedure it aborted, either by external signal or internal error. return err // The procedure it aborted, either by external signal or internal error.
} }

View File

@ -149,7 +149,7 @@ type testHelper struct {
func newHelper() *testHelper { func newHelper() *testHelper {
diskdb := rawdb.NewMemoryDatabase() diskdb := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(diskdb) triedb := trie.NewDatabase(diskdb)
accTrie, _ := trie.NewStateTrie(common.Hash{}, common.Hash{}, triedb) accTrie, _ := trie.NewStateTrie(trie.StateTrieID(common.Hash{}), triedb)
return &testHelper{ return &testHelper{
diskdb: diskdb, diskdb: diskdb,
triedb: triedb, triedb: triedb,
@ -182,7 +182,8 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
} }
func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte { func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
stTrie, _ := trie.NewStateTrie(owner, common.Hash{}, t.triedb) id := trie.StorageTrieID(stateRoot, owner, common.Hash{})
stTrie, _ := trie.NewStateTrie(id, t.triedb)
for i, k := range keys { for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i])) stTrie.Update([]byte(k), []byte(vals[i]))
} }

View File

@ -159,9 +159,9 @@ func (s *stateObject) getTrie(db Database) Trie {
} }
if s.trie == nil { if s.trie == nil {
var err error var err error
s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root) s.trie, err = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
if err != nil { if err != nil {
s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{}) s.trie, _ = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{})
s.setError(fmt.Errorf("can't create storage trie: %v", err)) s.setError(fmt.Errorf("can't create storage trie: %v", err))
} }
} }

View File

@ -120,6 +120,7 @@ type StateDB struct {
SnapshotAccountReads time.Duration SnapshotAccountReads time.Duration
SnapshotStorageReads time.Duration SnapshotStorageReads time.Duration
SnapshotCommits time.Duration SnapshotCommits time.Duration
TrieDBCommits time.Duration
AccountUpdated int AccountUpdated int
StorageUpdated int StorageUpdated int
@ -904,9 +905,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
// Commit objects to the trie, measuring the elapsed time // Commit objects to the trie, measuring the elapsed time
var ( var (
accountTrieNodes int accountTrieNodesUpdated int
storageTrieNodes int accountTrieNodesDeleted int
nodes = trie.NewMergedNodeSet() storageTrieNodesUpdated int
storageTrieNodesDeleted int
nodes = trie.NewMergedNodeSet()
) )
codeWriter := s.db.DiskDB().NewBatch() codeWriter := s.db.DiskDB().NewBatch()
for addr := range s.stateObjectsDirty { for addr := range s.stateObjectsDirty {
@ -926,7 +929,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := nodes.Merge(set); err != nil { if err := nodes.Merge(set); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
storageTrieNodes += set.Len() updates, deleted := set.Size()
storageTrieNodesUpdated += updates
storageTrieNodesDeleted += deleted
} }
} }
} }
@ -952,7 +957,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := nodes.Merge(set); err != nil { if err := nodes.Merge(set); err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
accountTrieNodes = set.Len() accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size()
} }
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start) s.AccountCommits += time.Since(start)
@ -961,16 +966,16 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageUpdatedMeter.Mark(int64(s.StorageUpdated)) storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted)) accountDeletedMeter.Mark(int64(s.AccountDeleted))
storageDeletedMeter.Mark(int64(s.StorageDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted))
accountTrieCommittedMeter.Mark(int64(accountTrieNodes)) accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
storageTriesCommittedMeter.Mark(int64(storageTrieNodes)) accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
s.AccountUpdated, s.AccountDeleted = 0, 0 s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0
} }
// If snapshotting is enabled, update the snapshot tree with this new version // If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil { if s.snap != nil {
if metrics.EnabledExpensive { start := time.Now()
defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
}
// Only update if there's a state transition (skip empty Clique blocks) // Only update if there's a state transition (skip empty Clique blocks)
if parent := s.snap.Root(); parent != root { if parent := s.snap.Root(); parent != root {
if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil { if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
@ -984,13 +989,29 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
} }
} }
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
}
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
} }
if err := s.db.TrieDB().Update(nodes); err != nil { if root == (common.Hash{}) {
return common.Hash{}, err root = emptyRoot
} }
s.originalRoot = root origin := s.originalRoot
return root, err if origin == (common.Hash{}) {
origin = emptyRoot
}
if root != origin {
start := time.Now()
if err := s.db.TrieDB().Update(nodes); err != nil {
return common.Hash{}, err
}
s.originalRoot = root
if metrics.EnabledExpensive {
s.TrieDBCommits += time.Since(start)
}
}
return root, nil
} }
// PrepareAccessList handles the preparatory steps for executing a state transition with // PrepareAccessList handles the preparatory steps for executing a state transition with

View File

@ -104,7 +104,7 @@ func checkTrieConsistency(db ethdb.KeyValueStore, root common.Hash) error {
if v, _ := db.Get(root[:]); v == nil { if v, _ := db.Get(root[:]); v == nil {
return nil // Consider a non existent state consistent. return nil // Consider a non existent state consistent.
} }
trie, err := trie.New(common.Hash{}, root, trie.NewDatabase(db)) trie, err := trie.New(trie.StateTrieID(root), trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }
@ -174,7 +174,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if commit { if commit {
srcDb.TrieDB().Commit(srcRoot, false, nil) srcDb.TrieDB().Commit(srcRoot, false, nil)
} }
srcTrie, _ := trie.New(common.Hash{}, srcRoot, srcDb.TrieDB()) srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
// Create a destination state and sync with the scheduler // Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase() dstDb := rawdb.NewMemoryDatabase()
@ -222,7 +222,8 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil { if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err) t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
} }
stTrie, err := trie.New(common.BytesToHash(node.syncPath[0]), acc.Root, srcDb.TrieDB()) id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
stTrie, err := trie.New(id, srcDb.TrieDB())
if err != nil { if err != nil {
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err) t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
} }

View File

@ -150,7 +150,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]
id := p.trieID(owner, root) id := p.trieID(owner, root)
fetcher := p.fetchers[id] fetcher := p.fetchers[id]
if fetcher == nil { if fetcher == nil {
fetcher = newSubfetcher(p.db, owner, root) fetcher = newSubfetcher(p.db, p.root, owner, root)
p.fetchers[id] = fetcher p.fetchers[id] = fetcher
} }
fetcher.schedule(keys) fetcher.schedule(keys)
@ -206,6 +206,7 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
// the trie being worked on is retrieved from the prefetcher. // the trie being worked on is retrieved from the prefetcher.
type subfetcher struct { type subfetcher struct {
db Database // Database to load trie nodes through db Database // Database to load trie nodes through
state common.Hash // Root hash of the state to prefetch
owner common.Hash // Owner of the trie, usually account hash owner common.Hash // Owner of the trie, usually account hash
root common.Hash // Root hash of the trie to prefetch root common.Hash // Root hash of the trie to prefetch
trie Trie // Trie being populated with nodes trie Trie // Trie being populated with nodes
@ -225,9 +226,10 @@ type subfetcher struct {
// newSubfetcher creates a goroutine to prefetch state items belonging to a // newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash. // particular root hash.
func newSubfetcher(db Database, owner common.Hash, root common.Hash) *subfetcher { func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher {
sf := &subfetcher{ sf := &subfetcher{
db: db, db: db,
state: state,
owner: owner, owner: owner,
root: root, root: root,
wake: make(chan struct{}, 1), wake: make(chan struct{}, 1),
@ -298,7 +300,7 @@ func (sf *subfetcher) loop() {
} }
sf.trie = trie sf.trie = trie
} else { } else {
trie, err := sf.db.OpenStorageTrie(sf.owner, sf.root) trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root)
if err != nil { if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return return

View File

@ -508,11 +508,11 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c
} }
triedb := api.eth.BlockChain().StateCache().TrieDB() triedb := api.eth.BlockChain().StateCache().TrieDB()
oldTrie, err := trie.NewStateTrie(common.Hash{}, startBlock.Root(), triedb) oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb)
if err != nil { if err != nil {
return nil, err return nil, err
} }
newTrie, err := trie.NewStateTrie(common.Hash{}, endBlock.Root(), triedb) newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -283,7 +283,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
req.Bytes = softResponseLimit req.Bytes = softResponseLimit
} }
// Retrieve the requested state and bail out if non existent // Retrieve the requested state and bail out if non existent
tr, err := trie.New(common.Hash{}, req.Root, chain.StateCache().TrieDB()) tr, err := trie.New(trie.StateTrieID(req.Root), chain.StateCache().TrieDB())
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
@ -413,7 +413,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if origin != (common.Hash{}) || (abort && len(storage) > 0) { if origin != (common.Hash{}) || (abort && len(storage) > 0) {
// Request started at a non-zero hash or was capped prematurely, add // Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs // the endpoint Merkle proofs
accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, chain.StateCache().TrieDB()) accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.StateCache().TrieDB())
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
@ -421,7 +421,8 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if err != nil || acc == nil { if err != nil || acc == nil {
return nil, nil return nil, nil
} }
stTrie, err := trie.NewStateTrie(account, acc.Root, chain.StateCache().TrieDB()) id := trie.StorageTrieID(req.Root, account, acc.Root)
stTrie, err := trie.NewStateTrie(id, chain.StateCache().TrieDB())
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
@ -487,7 +488,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
// Make sure we have the state associated with the request // Make sure we have the state associated with the request
triedb := chain.StateCache().TrieDB() triedb := chain.StateCache().TrieDB()
accTrie, err := trie.NewStateTrie(common.Hash{}, req.Root, triedb) accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb)
if err != nil { if err != nil {
// We don't have the requested state available, bail out // We don't have the requested state available, bail out
return nil, nil return nil, nil
@ -529,7 +530,8 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s
if err != nil || account == nil { if err != nil || account == nil {
break break
} }
stTrie, err := trie.NewStateTrie(common.BytesToHash(pathset[0]), common.BytesToHash(account.Root), triedb) id := trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), common.BytesToHash(account.Root))
stTrie, err := trie.NewStateTrie(id, triedb)
loads++ // always account database reads, even for failures loads++ // always account database reads, even for failures
if err != nil { if err != nil {
break break

View File

@ -1372,7 +1372,7 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
root, nodes, _ := accTrie.Commit(false) root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes)) db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db) accTrie, _ = trie.New(trie.StateTrieID(root), db)
return accTrie, entries return accTrie, entries
} }
@ -1434,7 +1434,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
root, nodes, _ := accTrie.Commit(false) root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes)) db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db) accTrie, _ = trie.New(trie.StateTrieID(root), db)
return accTrie, entries return accTrie, entries
} }
@ -1484,10 +1484,11 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
db.Update(nodes) db.Update(nodes)
// Re-create tries with new root // Re-create tries with new root
accTrie, _ = trie.New(common.Hash{}, root, db) accTrie, _ = trie.New(trie.StateTrieID(root), db)
for i := uint64(1); i <= uint64(accounts); i++ { for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i) key := key32(i)
trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db) id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
trie, _ := trie.New(id, db)
storageTries[common.BytesToHash(key)] = trie storageTries[common.BytesToHash(key)] = trie
} }
return accTrie, entries, storageTries, storageEntries return accTrie, entries, storageTries, storageEntries
@ -1548,13 +1549,14 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
db.Update(nodes) db.Update(nodes)
// Re-create tries with new root // Re-create tries with new root
accTrie, err := trie.New(common.Hash{}, root, db) accTrie, err := trie.New(trie.StateTrieID(root), db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
for i := uint64(1); i <= uint64(accounts); i++ { for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i) key := key32(i)
trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db) id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
trie, err := trie.New(id, db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -1567,7 +1569,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure // not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique. // that tries are unique.
func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) { func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
trie, _ := trie.New(owner, common.Hash{}, db) trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
var entries entrySlice var entries entrySlice
for i := uint64(1); i <= n; i++ { for i := uint64(1); i <= n; i++ {
// store 'x' at slot 'x' // store 'x' at slot 'x'
@ -1593,7 +1595,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
var ( var (
entries entrySlice entries entrySlice
boundaries []common.Hash boundaries []common.Hash
trie, _ = trie.New(owner, common.Hash{}, db) trie, _ = trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
) )
// Initialize boundaries // Initialize boundaries
var next common.Hash var next common.Hash
@ -1640,7 +1642,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper() t.Helper()
triedb := trie.NewDatabase(db) triedb := trie.NewDatabase(db)
accTrie, err := trie.New(common.Hash{}, root, triedb) accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1658,7 +1660,8 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
} }
accounts++ accounts++
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storeTrie, err := trie.NewStateTrie(common.BytesToHash(accIt.Key), acc.Root, triedb) id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
storeTrie, err := trie.NewStateTrie(id, triedb)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -229,7 +229,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct // For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil { if block := dl.GetBlockByHash(hash); block != nil {
_, err := trie.NewStateTrie(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb)) _, err := trie.NewStateTrie(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb))
return err return err
} }
return fmt.Errorf("non existent block: %x", hash[:4]) return fmt.Errorf("non existent block: %x", hash[:4])

View File

@ -405,7 +405,7 @@ func testGetProofs(t *testing.T, protocol int) {
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}} accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ { for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i) header := bc.GetHeaderByNumber(i)
trie, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db)) trie, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
for _, acc := range accounts { for _, acc := range accounts {
req := ProofReq{ req := ProofReq{
@ -456,7 +456,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
var expected []rlp.RawValue var expected []rlp.RawValue
if wantOK { if wantOK {
proofsV2 := light.NewNodeSet() proofsV2 := light.NewNodeSet()
t, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db)) t, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
t.Prove(account, 0, proofsV2) t.Prove(account, 0, proofsV2)
expected = proofsV2.NodeList() expected = proofsV2.NodeList()
} }
@ -512,7 +512,7 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxData: [][]byte{rlp}, AuxData: [][]byte{rlp},
} }
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash()) root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix))) trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs) trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols // Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{ requestsV2 := []HelperTrieReq{{
@ -577,7 +577,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
var proofs HelperTrieResps var proofs HelperTrieResps
root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash()) root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix))) trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
trie.Prove(key, 0, &proofs.Proofs) trie.Prove(key, 0, &proofs.Proofs)
// Send the proof request and verify the response // Send the proof request and verify the response

View File

@ -104,6 +104,7 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) {
bhash := rawdb.ReadCanonicalHash(server.db, i) bhash := rawdb.ReadCanonicalHash(server.db, i)
if req := fn(client.db, bhash, i); req != nil { if req := fn(client.db, bhash, i); req != nil {
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
err := client.handler.backend.odr.Retrieve(ctx, req) err := client.handler.backend.odr.Retrieve(ctx, req)
cancel() cancel()

View File

@ -359,7 +359,7 @@ func (h *serverHandler) AddTxsSync() bool {
// getAccount retrieves an account from the state based on root. // getAccount retrieves an account from the state based on root.
func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) { func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) {
trie, err := trie.New(common.Hash{}, root, triedb) trie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil { if err != nil {
return types.StateAccount{}, err return types.StateAccount{}, err
} }
@ -391,7 +391,7 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {
if root == (common.Hash{}) { if root == (common.Hash{}) {
return nil return nil
} }
trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
return trie return trie
} }

View File

@ -428,7 +428,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
p.bumpInvalid() p.bumpInvalid()
continue continue
} }
trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) trie, err = statedb.OpenStorageTrie(root, common.BytesToHash(request.AccKey), account.Root)
if trie == nil || err != nil { if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err)
continue continue

View File

@ -53,9 +53,11 @@ type OdrRequest interface {
// TrieID identifies a state or account storage trie // TrieID identifies a state or account storage trie
type TrieID struct { type TrieID struct {
BlockHash, Root common.Hash BlockHash common.Hash
BlockNumber uint64 BlockNumber uint64
AccKey []byte StateRoot common.Hash
Root common.Hash
AccKey []byte
} }
// StateTrieID returns a TrieID for a state trie belonging to a certain block // StateTrieID returns a TrieID for a state trie belonging to a certain block
@ -64,8 +66,9 @@ func StateTrieID(header *types.Header) *TrieID {
return &TrieID{ return &TrieID{
BlockHash: header.Hash(), BlockHash: header.Hash(),
BlockNumber: header.Number.Uint64(), BlockNumber: header.Number.Uint64(),
AccKey: nil, StateRoot: header.Root,
Root: header.Root, Root: header.Root,
AccKey: nil,
} }
} }
@ -76,6 +79,7 @@ func StorageTrieID(state *TrieID, addrHash, root common.Hash) *TrieID {
return &TrieID{ return &TrieID{
BlockHash: state.BlockHash, BlockHash: state.BlockHash,
BlockNumber: state.BlockNumber, BlockNumber: state.BlockNumber,
StateRoot: state.StateRoot,
AccKey: addrHash[:], AccKey: addrHash[:],
Root: root, Root: root,
} }

View File

@ -87,7 +87,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
t state.Trie t state.Trie
) )
if len(req.Id.AccKey) > 0 { if len(req.Id.AccKey) > 0 {
t, err = odr.serverState.OpenStorageTrie(common.BytesToHash(req.Id.AccKey), req.Id.Root) t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root)
} else { } else {
t, err = odr.serverState.OpenTrie(req.Id.Root) t, err = odr.serverState.OpenTrie(req.Id.Root)
} }

View File

@ -25,7 +25,6 @@ import (
"math/big" "math/big"
"time" "time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil" "github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -134,7 +133,6 @@ type ChtIndexerBackend struct {
diskdb, trieTable ethdb.Database diskdb, trieTable ethdb.Database
odr OdrBackend odr OdrBackend
triedb *trie.Database triedb *trie.Database
trieset mapset.Set
section, sectionSize uint64 section, sectionSize uint64
lastHash common.Hash lastHash common.Hash
trie *trie.Trie trie *trie.Trie
@ -148,7 +146,6 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis
odr: odr, odr: odr,
trieTable: trieTable, trieTable: trieTable,
triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
sectionSize: size, sectionSize: size,
disablePruning: disablePruning, disablePruning: disablePruning,
} }
@ -187,12 +184,12 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
root = GetChtRoot(c.diskdb, section-1, lastSectionHead) root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
} }
var err error var err error
c.trie, err = trie.New(common.Hash{}, root, c.triedb) c.trie, err = trie.New(trie.TrieID(root), c.triedb)
if err != nil && c.odr != nil { if err != nil && c.odr != nil {
err = c.fetchMissingNodes(ctx, section, root) err = c.fetchMissingNodes(ctx, section, root)
if err == nil { if err == nil {
c.trie, err = trie.New(common.Hash{}, root, c.triedb) c.trie, err = trie.New(trie.TrieID(root), c.triedb)
} }
} }
c.section = section c.section = section
@ -226,38 +223,44 @@ func (c *ChtIndexerBackend) Commit() error {
if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil { if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
return err return err
} }
if err := c.triedb.Commit(root, false, nil); err != nil {
return err
}
} }
// Re-create trie with newly generated root and updated database. // Re-create trie with newly generated root and updated database.
c.trie, err = trie.New(common.Hash{}, root, c.triedb) c.trie, err = trie.New(trie.TrieID(root), c.triedb)
if err != nil { if err != nil {
return err return err
} }
// Pruning historical trie nodes if necessary. // Pruning historical trie nodes if necessary.
if !c.disablePruning { if !c.disablePruning {
// Flush the triedb and track the latest trie nodes.
c.trieset.Clear()
c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
it := c.trieTable.NewIterator(nil, nil) it := c.trieTable.NewIterator(nil, nil)
defer it.Release() defer it.Release()
var ( var (
deleted int deleted int
remaining int batch = c.trieTable.NewBatch()
t = time.Now() t = time.Now()
) )
for it.Next() { hashes := make(map[common.Hash]struct{})
trimmed := bytes.TrimPrefix(it.Key(), []byte(ChtTablePrefix)) if nodes != nil {
if !c.trieset.Contains(common.BytesToHash(trimmed)) { for _, hash := range nodes.Hashes() {
c.trieTable.Delete(trimmed) hashes[hash] = struct{}{}
deleted += 1
} else {
remaining += 1
} }
} }
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t))) for it.Next() {
} else { trimmed := bytes.TrimPrefix(it.Key(), []byte(ChtTablePrefix))
c.triedb.Commit(root, false, nil) if len(trimmed) == common.HashLength {
if _, ok := hashes[common.BytesToHash(trimmed)]; !ok {
batch.Delete(trimmed)
deleted += 1
}
}
}
if err := batch.Write(); err != nil {
return err
}
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t)))
} }
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root) StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
@ -333,7 +336,6 @@ type BloomTrieIndexerBackend struct {
disablePruning bool disablePruning bool
diskdb, trieTable ethdb.Database diskdb, trieTable ethdb.Database
triedb *trie.Database triedb *trie.Database
trieset mapset.Set
odr OdrBackend odr OdrBackend
section uint64 section uint64
parentSize uint64 parentSize uint64
@ -351,7 +353,6 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin
odr: odr, odr: odr,
trieTable: trieTable, trieTable: trieTable,
triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
parentSize: parentSize, parentSize: parentSize,
size: size, size: size,
disablePruning: disablePruning, disablePruning: disablePruning,
@ -414,11 +415,11 @@ func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, las
root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead) root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
} }
var err error var err error
b.trie, err = trie.New(common.Hash{}, root, b.triedb) b.trie, err = trie.New(trie.TrieID(root), b.triedb)
if err != nil && b.odr != nil { if err != nil && b.odr != nil {
err = b.fetchMissingNodes(ctx, section, root) err = b.fetchMissingNodes(ctx, section, root)
if err == nil { if err == nil {
b.trie, err = trie.New(common.Hash{}, root, b.triedb) b.trie, err = trie.New(trie.TrieID(root), b.triedb)
} }
} }
b.section = section b.section = section
@ -473,38 +474,44 @@ func (b *BloomTrieIndexerBackend) Commit() error {
if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil { if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
return err return err
} }
if err := b.triedb.Commit(root, false, nil); err != nil {
return err
}
} }
// Re-create trie with newly generated root and updated database. // Re-create trie with newly generated root and updated database.
b.trie, err = trie.New(common.Hash{}, root, b.triedb) b.trie, err = trie.New(trie.TrieID(root), b.triedb)
if err != nil { if err != nil {
return err return err
} }
// Pruning historical trie nodes if necessary. // Pruning historical trie nodes if necessary.
if !b.disablePruning { if !b.disablePruning {
// Flush the triedb and track the latest trie nodes.
b.trieset.Clear()
b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
it := b.trieTable.NewIterator(nil, nil) it := b.trieTable.NewIterator(nil, nil)
defer it.Release() defer it.Release()
var ( var (
deleted int deleted int
remaining int batch = b.trieTable.NewBatch()
t = time.Now() t = time.Now()
) )
for it.Next() { hashes := make(map[common.Hash]struct{})
trimmed := bytes.TrimPrefix(it.Key(), []byte(BloomTrieTablePrefix)) if nodes != nil {
if !b.trieset.Contains(common.BytesToHash(trimmed)) { for _, hash := range nodes.Hashes() {
b.trieTable.Delete(trimmed) hashes[hash] = struct{}{}
deleted += 1
} else {
remaining += 1
} }
} }
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t))) for it.Next() {
} else { trimmed := bytes.TrimPrefix(it.Key(), []byte(BloomTrieTablePrefix))
b.triedb.Commit(root, false, nil) if len(trimmed) == common.HashLength {
if _, ok := hashes[common.BytesToHash(trimmed)]; !ok {
batch.Delete(trimmed)
deleted += 1
}
}
}
if err := batch.Write(); err != nil {
return err
}
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t)))
} }
sectionHead := b.sectionHeads[b.bloomTrieRatio-1] sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root) StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)

View File

@ -54,7 +54,7 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) {
return &odrTrie{db: db, id: db.id}, nil return &odrTrie{db: db, id: db.id}, nil
} }
func (db *odrDatabase) OpenStorageTrie(addrHash, root common.Hash) (state.Trie, error) { func (db *odrDatabase) OpenStorageTrie(state, addrHash, root common.Hash) (state.Trie, error) {
return &odrTrie{db: db, id: StorageTrieID(db.id, addrHash, root)}, nil return &odrTrie{db: db, id: StorageTrieID(db.id, addrHash, root)}, nil
} }
@ -63,8 +63,7 @@ func (db *odrDatabase) CopyTrie(t state.Trie) state.Trie {
case *odrTrie: case *odrTrie:
cpy := &odrTrie{db: t.db, id: t.id} cpy := &odrTrie{db: t.db, id: t.id}
if t.trie != nil { if t.trie != nil {
cpytrie := *t.trie cpy.trie = t.trie.Copy()
cpy.trie = &cpytrie
} }
return cpy return cpy
default: default:
@ -197,11 +196,13 @@ func (t *odrTrie) do(key []byte, fn func() error) error {
for { for {
var err error var err error
if t.trie == nil { if t.trie == nil {
var owner common.Hash var id *trie.ID
if len(t.id.AccKey) > 0 { if len(t.id.AccKey) > 0 {
owner = common.BytesToHash(t.id.AccKey) id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root)
} else {
id = trie.StateTrieID(t.id.StateRoot)
} }
t.trie, err = trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database())) t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database()))
} }
if err == nil { if err == nil {
err = fn() err = fn()
@ -227,11 +228,13 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator {
// Open the actual non-ODR trie if that hasn't happened yet. // Open the actual non-ODR trie if that hasn't happened yet.
if t.trie == nil { if t.trie == nil {
it.do(func() error { it.do(func() error {
var owner common.Hash var id *trie.ID
if len(t.id.AccKey) > 0 { if len(t.id.AccKey) > 0 {
owner = common.BytesToHash(t.id.AccKey) id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root)
} else {
id = trie.StateTrieID(t.id.StateRoot)
} }
t, err := trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database())) t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database()))
if err == nil { if err == nil {
it.t.trie = t it.t.trie = t
} }

View File

@ -21,7 +21,6 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -170,7 +169,7 @@ func runRandTest(rt randTest) error {
return err return err
} }
} }
newtr, err := trie.New(common.Hash{}, hash, triedb) newtr, err := trie.New(trie.TrieID(hash), triedb)
if err != nil { if err != nil {
return err return err
} }

View File

@ -33,13 +33,15 @@ type leaf struct {
// insertion order. // insertion order.
type committer struct { type committer struct {
nodes *NodeSet nodes *NodeSet
tracer *tracer
collectLeaf bool collectLeaf bool
} }
// newCommitter creates a new committer or picks one from the pool. // newCommitter creates a new committer or picks one from the pool.
func newCommitter(owner common.Hash, collectLeaf bool) *committer { func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer {
return &committer{ return &committer{
nodes: NewNodeSet(owner), nodes: NewNodeSet(owner),
tracer: tracer,
collectLeaf: collectLeaf, collectLeaf: collectLeaf,
} }
} }
@ -51,6 +53,20 @@ func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
// Some nodes can be deleted from trie which can't be captured by committer
// itself. Iterate all deleted nodes tracked by tracer and marked them as
// deleted only if they are present in database previously.
for _, path := range c.tracer.deleteList() {
// There are a few possibilities for this scenario(the node is deleted
// but not present in database previously), for example the node was
// embedded in the parent and now deleted from the trie. In this case
// it's noop from database's perspective.
val := c.tracer.getPrev(path)
if len(val) == 0 {
continue
}
c.nodes.markDeleted(path, val)
}
return h.(hashNode), c.nodes, nil return h.(hashNode), c.nodes, nil
} }
@ -83,6 +99,12 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok { if hn, ok := hashedNode.(hashNode); ok {
return hn, nil return hn, nil
} }
// The short node now is embedded in its parent. Mark the node as
// deleted if it's present in database previously. It's equivalent
// as deletion from database's perspective.
if prev := c.tracer.getPrev(path); len(prev) != 0 {
c.nodes.markDeleted(path, prev)
}
return collapsed, nil return collapsed, nil
case *fullNode: case *fullNode:
hashedKids, err := c.commitChildren(path, cn) hashedKids, err := c.commitChildren(path, cn)
@ -96,6 +118,12 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok { if hn, ok := hashedNode.(hashNode); ok {
return hn, nil return hn, nil
} }
// The full node now is embedded in its parent. Mark the node as
// deleted if it's present in database previously. It's equivalent
// as deletion from database's perspective.
if prev := c.tracer.getPrev(path); len(prev) != 0 {
c.nodes.markDeleted(path, prev)
}
return collapsed, nil return collapsed, nil
case hashNode: case hashNode:
return cn, nil return cn, nil
@ -161,7 +189,7 @@ func (c *committer) store(path []byte, n node) node {
} }
) )
// Collect the dirty node to nodeset for return. // Collect the dirty node to nodeset for return.
c.nodes.add(string(path), mnode) c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path))
// Collect the corresponding leaf node if it's required. We don't check // Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key // full node since it's impossible to store value in fullNode. The key

View File

@ -795,8 +795,8 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
} }
for _, owner := range order { for _, owner := range order {
subset := nodes.sets[owner] subset := nodes.sets[owner]
for _, path := range subset.paths { for _, path := range subset.updates.order {
n, ok := subset.nodes[path] n, ok := subset.updates.nodes[path]
if !ok { if !ok {
return fmt.Errorf("missing node %x %v", owner, path) return fmt.Errorf("missing node %x %v", owner, path)
} }
@ -837,6 +837,34 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
} }
// GetReader retrieves a node reader belonging to the given state root.
func (db *Database) GetReader(root common.Hash) Reader {
return newHashReader(db)
}
// hashReader is reader of hashDatabase which implements the Reader interface.
type hashReader struct {
db *Database
}
// newHashReader initializes the hash reader.
func newHashReader(db *Database) *hashReader {
return &hashReader{db: db}
}
// Node retrieves the trie node with the given node hash.
// No error will be returned if the node is not found.
func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) {
return reader.db.node(hash), nil
}
// NodeBlob retrieves the RLP-encoded trie node blob with the given node hash.
// No error will be returned if the node is not found.
func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
blob, _ := reader.db.Node(hash)
return blob, nil
}
// saveCache saves clean state cache to given directory path // saveCache saves clean state cache to given directory path
// using specified CPU cores. // using specified CPU cores.
func (db *Database) saveCache(dir string, threads int) error { func (db *Database) saveCache(dir string, threads int) error {

View File

@ -375,7 +375,12 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
} }
} }
} }
return it.trie.resolveHash(hash, path) // Retrieve the specified node from the underlying node reader.
// it.trie.resolveAndTrack is not used since in that function the
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
return it.trie.reader.node(path, common.BytesToHash(hash))
} }
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) { func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
@ -384,7 +389,12 @@ func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error)
return blob, nil return blob, nil
} }
} }
return it.trie.resolveBlob(hash, path) // Retrieve the specified node from the underlying node reader.
// it.trie.resolveAndTrack is not used since in that function the
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
return it.trie.reader.nodeBlob(path, common.BytesToHash(hash))
} }
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error { func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {

View File

@ -66,7 +66,7 @@ func TestIterator(t *testing.T) {
} }
db.Update(NewWithNodeSet(nodes)) db.Update(NewWithNodeSet(nodes))
trie, _ = New(common.Hash{}, root, db) trie, _ = New(TrieID(root), db)
found := make(map[string]string) found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil)) it := NewIterator(trie.NodeIterator(nil))
for it.Next() { for it.Next() {
@ -227,7 +227,7 @@ func TestDifferenceIterator(t *testing.T) {
} }
rootA, nodesA, _ := triea.Commit(false) rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA)) dba.Update(NewWithNodeSet(nodesA))
triea, _ = New(common.Hash{}, rootA, dba) triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase()) dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb) trieb := NewEmpty(dbb)
@ -236,7 +236,7 @@ func TestDifferenceIterator(t *testing.T) {
} }
rootB, nodesB, _ := trieb.Commit(false) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB)) dbb.Update(NewWithNodeSet(nodesB))
trieb, _ = New(common.Hash{}, rootB, dbb) trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string) found := make(map[string]string)
di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
@ -269,7 +269,7 @@ func TestUnionIterator(t *testing.T) {
} }
rootA, nodesA, _ := triea.Commit(false) rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA)) dba.Update(NewWithNodeSet(nodesA))
triea, _ = New(common.Hash{}, rootA, dba) triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase()) dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb) trieb := NewEmpty(dbb)
@ -278,7 +278,7 @@ func TestUnionIterator(t *testing.T) {
} }
rootB, nodesB, _ := trieb.Commit(false) rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB)) dbb.Update(NewWithNodeSet(nodesB))
trieb, _ = New(common.Hash{}, rootB, dbb) trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
it := NewIterator(di) it := NewIterator(di)
@ -356,7 +356,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
} }
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB. // Create trie that will load all nodes from DB.
tr, _ := New(common.Hash{}, tr.Hash(), triedb) tr, _ := New(TrieID(tr.Hash()), triedb)
// Remove a random node from the database. It can't be the root node // Remove a random node from the database. It can't be the root node
// because that one is already loaded. // because that one is already loaded.
@ -445,7 +445,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
} }
// Create a new iterator that seeks to "bars". Seeking can't proceed because // Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing. // the node is missing.
tr, _ := New(common.Hash{}, root, triedb) tr, _ := New(TrieID(root), triedb)
it := tr.NodeIterator([]byte("bars")) it := tr.NodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError) missing, ok := it.Error().(*MissingNodeError)
if !ok { if !ok {
@ -533,7 +533,7 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) {
// Create an empty trie // Create an empty trie
logDb := &loggingDb{0, memorydb.New()} logDb := &loggingDb{0, memorydb.New()}
triedb := NewDatabase(logDb) triedb := NewDatabase(logDb)
trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) trie, _ := NewStateTrie(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data // Fill it with some arbitrary data
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {

View File

@ -18,6 +18,8 @@ package trie
import ( import (
"fmt" "fmt"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
@ -25,18 +27,77 @@ import (
// memoryNode is all the information we know about a single cached trie node // memoryNode is all the information we know about a single cached trie node
// in the memory. // in the memory.
type memoryNode struct { type memoryNode struct {
hash common.Hash // Node hash, computed by hashing rlp value hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
size uint16 // Byte size of the useful cached data size uint16 // Byte size of the useful cached data, 0 for deleted nodes
node node // Cached collapsed trie node, or raw rlp data node node // Cached collapsed trie node, or raw rlp data, nil for deleted nodes
}
// memoryNodeSize is the raw size of a memoryNode data structure without any
// node data included. It's an approximate size, but should be a lot better
// than not counting them.
// nolint:unused
var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size())
// memorySize returns the total memory size used by this node.
// nolint:unused
func (n *memoryNode) memorySize(key int) int {
return int(n.size) + memoryNodeSize + key
}
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
// from the cache, or by regenerating it from the collapsed node.
// nolint:unused
func (n *memoryNode) rlp() []byte {
if node, ok := n.node.(rawNode); ok {
return node
}
return nodeToBytes(n.node)
}
// obj returns the decoded and expanded trie node, either directly from the cache,
// or by regenerating it from the rlp encoded blob.
// nolint:unused
func (n *memoryNode) obj() node {
if node, ok := n.node.(rawNode); ok {
return mustDecodeNode(n.hash[:], node)
}
return expandNode(n.hash[:], n.node)
}
// nodeWithPrev wraps the memoryNode with the previous node value.
type nodeWithPrev struct {
*memoryNode
prev []byte // RLP-encoded previous value, nil means it's non-existent
}
// unwrap returns the internal memoryNode object.
// nolint:unused
func (n *nodeWithPrev) unwrap() *memoryNode {
return n.memoryNode
}
// memorySize returns the total memory size used by this node. It overloads
// the function in memoryNode by counting the size of previous value as well.
// nolint: unused
func (n *nodeWithPrev) memorySize(key int) int {
return n.memoryNode.memorySize(key) + len(n.prev)
}
// nodesWithOrder represents a collection of dirty nodes which includes
// newly-inserted and updated nodes. The modification order of all nodes
// is represented by order list.
type nodesWithOrder struct {
order []string // the path list of dirty nodes, sort by insertion order
nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path
} }
// NodeSet contains all dirty nodes collected during the commit operation. // NodeSet contains all dirty nodes collected during the commit operation.
// Each node is keyed by path. It's not thread-safe to use. // Each node is keyed by path. It's not thread-safe to use.
type NodeSet struct { type NodeSet struct {
owner common.Hash // the identifier of the trie owner common.Hash // the identifier of the trie
paths []string // the path of dirty nodes, sort by insertion order updates *nodesWithOrder // the set of updated nodes(newly inserted, updated)
nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path deletes map[string][]byte // the map of deleted nodes, keyed by node
leaves []*leaf // the list of dirty leaves leaves []*leaf // the list of dirty leaves
} }
// NewNodeSet initializes an empty node set to be used for tracking dirty nodes // NewNodeSet initializes an empty node set to be used for tracking dirty nodes
@ -45,24 +106,78 @@ type NodeSet struct {
func NewNodeSet(owner common.Hash) *NodeSet { func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{ return &NodeSet{
owner: owner, owner: owner,
nodes: make(map[string]*memoryNode), updates: &nodesWithOrder{
nodes: make(map[string]*nodeWithPrev),
},
deletes: make(map[string][]byte),
} }
} }
// add caches node with provided path and node object. // NewNodeSetWithDeletion initializes the nodeset with provided deletion set.
func (set *NodeSet) add(path string, node *memoryNode) { func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet {
set.paths = append(set.paths, path) set := NewNodeSet(owner)
set.nodes[path] = node for i, path := range paths {
set.markDeleted(path, prev[i])
}
return set
} }
// addLeaf caches the provided leaf node. // markUpdated marks the node as dirty(newly-inserted or updated) with provided
// node path, node object along with its previous value.
func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) {
set.updates.order = append(set.updates.order, string(path))
set.updates.nodes[string(path)] = &nodeWithPrev{
memoryNode: node,
prev: prev,
}
}
// markDeleted marks the node as deleted with provided path and previous value.
func (set *NodeSet) markDeleted(path []byte, prev []byte) {
set.deletes[string(path)] = prev
}
// addLeaf collects the provided leaf node into set.
func (set *NodeSet) addLeaf(node *leaf) { func (set *NodeSet) addLeaf(node *leaf) {
set.leaves = append(set.leaves, node) set.leaves = append(set.leaves, node)
} }
// Len returns the number of dirty nodes contained in the set. // Size returns the number of updated and deleted nodes contained in the set.
func (set *NodeSet) Len() int { func (set *NodeSet) Size() (int, int) {
return len(set.nodes) return len(set.updates.order), len(set.deletes)
}
// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can
// we get rid of it?
func (set *NodeSet) Hashes() []common.Hash {
var ret []common.Hash
for _, node := range set.updates.nodes {
ret = append(ret, node.hash)
}
return ret
}
// Summary returns a string-representation of the NodeSet.
func (set *NodeSet) Summary() string {
var out = new(strings.Builder)
fmt.Fprintf(out, "nodeset owner: %v\n", set.owner)
if set.updates != nil {
for _, key := range set.updates.order {
updated := set.updates.nodes[key]
if updated.prev != nil {
fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev)
} else {
fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash)
}
}
}
for k, n := range set.deletes {
fmt.Fprintf(out, " [-]: %x -> %x\n", k, n)
}
for _, n := range set.leaves {
fmt.Fprintf(out, "[leaf]: %v\n", n)
}
return out.String()
} }
// MergedNodeSet represents a merged dirty node set for a group of tries. // MergedNodeSet represents a merged dirty node set for a group of tries.

View File

@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -60,8 +59,13 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
key = key[1:] key = key[1:]
nodes = append(nodes, n) nodes = append(nodes, n)
case hashNode: case hashNode:
// Retrieve the specified node from the underlying node reader.
// trie.resolveAndTrack is not used since in that function the
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
var err error var err error
tn, err = t.resolveHash(n, prefix) tn, err = t.reader.node(prefix, common.BytesToHash(n))
if err != nil { if err != nil {
log.Error("Unhandled trie error in Trie.Prove", "err", err) log.Error("Unhandled trie error in Trie.Prove", "err", err)
return err return err
@ -559,7 +563,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
} }
// Rebuild the trie with the leaf stream, the shape of trie // Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one. // should be same with the original one.
tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())} tr := &Trie{root: root, reader: newEmptyReader()}
if empty { if empty {
tr.root = nil tr.root = nil
} }

View File

@ -29,8 +29,13 @@ type SecureTrie = StateTrie
// NewSecure creates a new StateTrie. // NewSecure creates a new StateTrie.
// Deprecated: use NewStateTrie. // Deprecated: use NewStateTrie.
func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) { func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) {
return NewStateTrie(owner, root, db) id := &ID{
StateRoot: stateRoot,
Owner: owner,
Root: root,
}
return NewStateTrie(id, db)
} }
// StateTrie wraps a trie with key hashing. In a stateTrie trie, all // StateTrie wraps a trie with key hashing. In a stateTrie trie, all
@ -56,11 +61,11 @@ type StateTrie struct {
// If root is the zero hash or the sha3 hash of an empty string, the // If root is the zero hash or the sha3 hash of an empty string, the
// trie is initially empty. Otherwise, New will panic if db is nil // trie is initially empty. Otherwise, New will panic if db is nil
// and returns MissingNodeError if the root node cannot be found. // and returns MissingNodeError if the root node cannot be found.
func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie, error) { func NewStateTrie(id *ID, db *Database) (*StateTrie, error) {
if db == nil { if db == nil {
panic("trie.NewStateTrie called without a database") panic("trie.NewStateTrie called without a database")
} }
trie, err := New(owner, root, db) trie, err := New(id, db)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -29,7 +29,7 @@ import (
) )
func newEmptySecure() *StateTrie { func newEmptySecure() *StateTrie {
trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New())) trie, _ := NewStateTrie(TrieID(common.Hash{}), NewDatabase(memorydb.New()))
return trie return trie
} }
@ -37,7 +37,7 @@ func newEmptySecure() *StateTrie {
func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
// Create an empty trie // Create an empty trie
triedb := NewDatabase(memorydb.New()) triedb := NewDatabase(memorydb.New())
trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) trie, _ := NewStateTrie(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data // Fill it with some arbitrary data
content := make(map[string][]byte) content := make(map[string][]byte)
@ -66,7 +66,7 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
panic(fmt.Errorf("failed to commit db %v", err)) panic(fmt.Errorf("failed to commit db %v", err))
} }
// Re-create the trie based on the new state // Re-create the trie based on the new state
trie, _ = NewSecure(common.Hash{}, root, triedb) trie, _ = NewStateTrie(TrieID(root), triedb)
return triedb, trie, content return triedb, trie, content
} }

View File

@ -30,7 +30,7 @@ import (
func makeTestTrie() (*Database, *StateTrie, map[string][]byte) { func makeTestTrie() (*Database, *StateTrie, map[string][]byte) {
// Create an empty trie // Create an empty trie
triedb := NewDatabase(memorydb.New()) triedb := NewDatabase(memorydb.New())
trie, _ := NewStateTrie(common.Hash{}, common.Hash{}, triedb) trie, _ := NewStateTrie(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data // Fill it with some arbitrary data
content := make(map[string][]byte) content := make(map[string][]byte)
@ -59,7 +59,7 @@ func makeTestTrie() (*Database, *StateTrie, map[string][]byte) {
panic(fmt.Errorf("failed to commit db %v", err)) panic(fmt.Errorf("failed to commit db %v", err))
} }
// Re-create the trie based on the new state // Re-create the trie based on the new state
trie, _ = NewSecure(common.Hash{}, root, triedb) trie, _ = NewStateTrie(TrieID(root), triedb)
return triedb, trie, content return triedb, trie, content
} }
@ -67,7 +67,7 @@ func makeTestTrie() (*Database, *StateTrie, map[string][]byte) {
// content map. // content map.
func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) { func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
// Check root availability and trie contents // Check root availability and trie contents
trie, err := NewStateTrie(common.Hash{}, common.BytesToHash(root), db) trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), db)
if err != nil { if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err) t.Fatalf("failed to create trie at %x: %v", root, err)
} }
@ -84,7 +84,7 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
// checkTrieConsistency checks that all nodes in a trie are indeed present. // checkTrieConsistency checks that all nodes in a trie are indeed present.
func checkTrieConsistency(db *Database, root common.Hash) error { func checkTrieConsistency(db *Database, root common.Hash) error {
// Create and iterate a trie rooted in a subnode // Create and iterate a trie rooted in a subnode
trie, err := NewStateTrie(common.Hash{}, root, db) trie, err := NewStateTrie(TrieID(root), db)
if err != nil { if err != nil {
return nil // Consider a non existent state consistent return nil // Consider a non existent state consistent
} }
@ -105,8 +105,8 @@ type trieElement struct {
func TestEmptySync(t *testing.T) { func TestEmptySync(t *testing.T) {
dbA := NewDatabase(memorydb.New()) dbA := NewDatabase(memorydb.New())
dbB := NewDatabase(memorydb.New()) dbB := NewDatabase(memorydb.New())
emptyA := NewEmpty(dbA) emptyA, _ := New(TrieID(common.Hash{}), dbA)
emptyB, _ := New(common.Hash{}, emptyRoot, dbB) emptyB, _ := New(TrieID(emptyRoot), dbB)
for i, trie := range []*Trie{emptyA, emptyB} { for i, trie := range []*Trie{emptyA, emptyB} {
sync := NewSync(trie.Hash(), memorydb.New(), nil) sync := NewSync(trie.Hash(), memorydb.New(), nil)

View File

@ -67,9 +67,8 @@ type Trie struct {
// actually unhashed nodes. // actually unhashed nodes.
unhashed int unhashed int
// db is the handler trie can retrieve nodes from. It's // reader is the handler trie can retrieve nodes from.
// only for reading purpose and not available for writing. reader *trieReader
db *Database
// tracer is the tool to track the trie changes. // tracer is the tool to track the trie changes.
// It will be reset after each commit operation. // It will be reset after each commit operation.
@ -87,26 +86,29 @@ func (t *Trie) Copy() *Trie {
root: t.root, root: t.root,
owner: t.owner, owner: t.owner,
unhashed: t.unhashed, unhashed: t.unhashed,
db: t.db, reader: t.reader,
tracer: t.tracer.copy(), tracer: t.tracer.copy(),
} }
} }
// New creates a trie with an existing root node from db and an assigned // New creates the trie instance with provided trie id and the read-only
// owner for storage proximity. // database. The state specified by trie id must be available, otherwise
// // an error will be returned. The trie root specified by trie id can be
// If root is the zero hash or the sha3 hash of an empty string, the // zero hash or the sha3 hash of an empty string, then trie is initially
// trie is initially empty and does not require a database. Otherwise, // empty, otherwise, the root node must be present in database or returns
// New will panic if db is nil and returns a MissingNodeError if root does // a MissingNodeError if not.
// not exist in the database. Accessing the trie loads nodes from db on demand. func New(id *ID, db NodeReader) (*Trie, error) {
func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) { reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
}
trie := &Trie{ trie := &Trie{
owner: owner, owner: id.Owner,
db: db, reader: reader,
//tracer: newTracer(), //tracer: newTracer(),
} }
if root != (common.Hash{}) && root != emptyRoot { if id.Root != (common.Hash{}) && id.Root != emptyRoot {
rootnode, err := trie.resolveHash(root[:], nil) rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -117,7 +119,7 @@ func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests. // NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
func NewEmpty(db *Database) *Trie { func NewEmpty(db *Database) *Trie {
tr, _ := New(common.Hash{}, common.Hash{}, db) tr, _ := New(TrieID(common.Hash{}), db)
return tr return tr
} }
@ -173,7 +175,7 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
} }
return value, n, didResolve, err return value, n, didResolve, err
case hashNode: case hashNode:
child, err := t.resolveHash(n, key[:pos]) child, err := t.resolveAndTrack(n, key[:pos])
if err != nil { if err != nil {
return nil, n, true, err return nil, n, true, err
} }
@ -219,7 +221,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
if hash == nil { if hash == nil {
return nil, origNode, 0, errors.New("non-consensus node") return nil, origNode, 0, errors.New("non-consensus node")
} }
blob, err := t.db.Node(common.BytesToHash(hash)) blob, err := t.reader.nodeBlob(path, common.BytesToHash(hash))
return blob, origNode, 1, err return blob, origNode, 1, err
} }
// Path still needs to be traversed, descend into children // Path still needs to be traversed, descend into children
@ -249,7 +251,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
return item, n, resolved, err return item, n, resolved, err
case hashNode: case hashNode:
child, err := t.resolveHash(n, path[:pos]) child, err := t.resolveAndTrack(n, path[:pos])
if err != nil { if err != nil {
return nil, n, 1, err return nil, n, 1, err
} }
@ -370,7 +372,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
// We've hit a part of the trie that isn't loaded yet. Load // We've hit a part of the trie that isn't loaded yet. Load
// the node and insert into it. This leaves all child nodes on // the node and insert into it. This leaves all child nodes on
// the path to the value in the trie. // the path to the value in the trie.
rn, err := t.resolveHash(n, prefix) rn, err := t.resolveAndTrack(n, prefix)
if err != nil { if err != nil {
return false, nil, err return false, nil, err
} }
@ -524,7 +526,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// We've hit a part of the trie that isn't loaded yet. Load // We've hit a part of the trie that isn't loaded yet. Load
// the node and delete from it. This leaves all child nodes on // the node and delete from it. This leaves all child nodes on
// the path to the value in the trie. // the path to the value in the trie.
rn, err := t.resolveHash(n, prefix) rn, err := t.resolveAndTrack(n, prefix)
if err != nil { if err != nil {
return false, nil, err return false, nil, err
} }
@ -548,30 +550,22 @@ func concat(s1 []byte, s2 ...byte) []byte {
func (t *Trie) resolve(n node, prefix []byte) (node, error) { func (t *Trie) resolve(n node, prefix []byte) (node, error) {
if n, ok := n.(hashNode); ok { if n, ok := n.(hashNode); ok {
return t.resolveHash(n, prefix) return t.resolveAndTrack(n, prefix)
} }
return n, nil return n, nil
} }
// resolveHash loads node from the underlying database with the provided // resolveAndTrack loads node from the underlying store with the given node hash
// node hash and path prefix. // and path prefix and also tracks the loaded node blob in tracer treated as the
func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { // node's original value. The rlp-encoded blob is preferred to be loaded from
hash := common.BytesToHash(n) // database because it's easy to decode node while complex to encode node to blob.
if node := t.db.node(hash); node != nil { func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
return node, nil blob, err := t.reader.nodeBlob(prefix, common.BytesToHash(n))
if err != nil {
return nil, err
} }
return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix} t.tracer.onRead(prefix, blob)
} return mustDecodeNode(n, blob), nil
// resolveHash loads rlp-encoded node blob from the underlying database
// with the provided node hash and path prefix.
func (t *Trie) resolveBlob(n hashNode, prefix []byte) ([]byte, error) {
hash := common.BytesToHash(n)
blob, _ := t.db.Node(hash)
if len(blob) != 0 {
return blob, nil
}
return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix}
} }
// Hash returns the root hash of the trie. It does not write to the // Hash returns the root hash of the trie. It does not write to the
@ -606,7 +600,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
t.root = hashedNode t.root = hashedNode
return rootHash, nil, nil return rootHash, nil, nil
} }
h := newCommitter(t.owner, collectLeaf) h := newCommitter(t.owner, t.tracer, collectLeaf)
newRoot, nodes, err := h.Commit(t.root) newRoot, nodes, err := h.Commit(t.root)
if err != nil { if err != nil {
return common.Hash{}, nil, err return common.Hash{}, nil, err
@ -633,6 +627,5 @@ func (t *Trie) Reset() {
t.root = nil t.root = nil
t.owner = common.Hash{} t.owner = common.Hash{}
t.unhashed = 0 t.unhashed = 0
//t.db = nil
t.tracer.reset() t.tracer.reset()
} }

55
trie/trie_id.go Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package trie
import "github.com/ethereum/go-ethereum/common"
// ID is the identifier for uniquely identifying a trie.
type ID struct {
StateRoot common.Hash // The root of the corresponding state(block.root)
Owner common.Hash // The contract address hash which the trie belongs to
Root common.Hash // The root hash of trie
}
// StateTrieID constructs an identifier for state trie with the provided state root.
func StateTrieID(root common.Hash) *ID {
return &ID{
StateRoot: root,
Owner: common.Hash{},
Root: root,
}
}
// StorageTrieID constructs an identifier for storage trie which belongs to a certain
// state and contract specified by the stateRoot and owner.
func StorageTrieID(stateRoot common.Hash, owner common.Hash, root common.Hash) *ID {
return &ID{
StateRoot: stateRoot,
Owner: owner,
Root: root,
}
}
// TrieID constructs an identifier for a standard trie(not a second-layer trie)
// with provided root. It's mostly used in tests and some other tries like CHT trie.
func TrieID(root common.Hash) *ID {
return &ID{
StateRoot: root,
Owner: common.Hash{},
Root: root,
}
}

106
trie/trie_reader.go Normal file
View File

@ -0,0 +1,106 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package trie
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
)
// Reader wraps the Node and NodeBlob method of a backing trie store.
type Reader interface {
// Node retrieves the trie node with the provided trie identifier, hexary
// node path and the corresponding node hash.
// No error will be returned if the node is not found.
Node(owner common.Hash, path []byte, hash common.Hash) (node, error)
// NodeBlob retrieves the RLP-encoded trie node blob with the provided trie
// identifier, hexary node path and the corresponding node hash.
// No error will be returned if the node is not found.
NodeBlob(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
// NodeReader wraps all the necessary functions for accessing trie node.
type NodeReader interface {
// GetReader returns a reader for accessing all trie nodes with provided
// state root. Nil is returned in case the state is not available.
GetReader(root common.Hash) Reader
}
// trieReader is a wrapper of the underlying node reader. It's not safe
// for concurrent usage.
type trieReader struct {
owner common.Hash
reader Reader
banned map[string]struct{} // Marker to prevent node from being accessed, for tests
}
// newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
reader := db.GetReader(stateRoot)
if reader == nil {
return nil, fmt.Errorf("state not found #%x", stateRoot)
}
return &trieReader{owner: owner, reader: reader}, nil
}
// newEmptyReader initializes the pure in-memory reader. All read operations
// should be forbidden and returns the MissingNodeError.
func newEmptyReader() *trieReader {
return &trieReader{}
}
// node retrieves the trie node with the provided trie node information.
// An MissingNodeError will be returned in case the node is not found or
// any error is encountered.
func (r *trieReader) node(path []byte, hash common.Hash) (node, error) {
// Perform the logics in tests for preventing trie node access.
if r.banned != nil {
if _, ok := r.banned[string(path)]; ok {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
}
}
if r.reader == nil {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
}
node, err := r.reader.Node(r.owner, path, hash)
if err != nil || node == nil {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
}
return node, nil
}
// node retrieves the rlp-encoded trie node with the provided trie node
// information. An MissingNodeError will be returned in case the node is
// not found or any error is encountered.
func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
// Perform the logics in tests for preventing trie node access.
if r.banned != nil {
if _, ok := r.banned[string(path)]; ok {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
}
}
if r.reader == nil {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
}
blob, err := r.reader.NodeBlob(r.owner, path, hash)
if err != nil || len(blob) == 0 {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
}
return blob, nil
}

View File

@ -64,7 +64,8 @@ func TestNull(t *testing.T) {
} }
func TestMissingRoot(t *testing.T) { func TestMissingRoot(t *testing.T) {
trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New())) root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
trie, err := New(TrieID(root), NewDatabase(memorydb.New()))
if trie != nil { if trie != nil {
t.Error("New returned non-nil trie for invalid root") t.Error("New returned non-nil trie for invalid root")
} }
@ -89,27 +90,27 @@ func testMissingNode(t *testing.T, memonly bool) {
triedb.Commit(root, true, nil) triedb.Commit(root, true, nil)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err := trie.TryGet([]byte("120000")) _, err := trie.TryGet([]byte("120000"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099")) _, err = trie.TryGet([]byte("120099"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456")) _, err = trie.TryGet([]byte("123456"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv")) err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456")) err = trie.TryDelete([]byte("123456"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
@ -122,27 +123,27 @@ func testMissingNode(t *testing.T, memonly bool) {
diskdb.Delete(hash[:]) diskdb.Delete(hash[:])
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120000")) _, err = trie.TryGet([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok { if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err) t.Errorf("Wrong error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099")) _, err = trie.TryGet([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok { if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err) t.Errorf("Wrong error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456")) _, err = trie.TryGet([]byte("123456"))
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcv")) err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok { if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err) t.Errorf("Wrong error: %v", err)
} }
trie, _ = New(common.Hash{}, root, triedb) trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456")) err = trie.TryDelete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok { if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err) t.Errorf("Wrong error: %v", err)
@ -196,7 +197,7 @@ func TestGet(t *testing.T) {
} }
root, nodes, _ := trie.Commit(false) root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes)) db.Update(NewWithNodeSet(nodes))
trie, _ = New(common.Hash{}, root, db) trie, _ = New(TrieID(root), db)
} }
} }
@ -273,7 +274,7 @@ func TestReplication(t *testing.T) {
triedb.Update(NewWithNodeSet(nodes)) triedb.Update(NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work. // create a new trie on top of the database and check that lookups work.
trie2, err := New(common.Hash{}, exp, triedb) trie2, err := New(TrieID(exp), triedb)
if err != nil { if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err) t.Fatalf("can't recreate trie at %x: %v", exp, err)
} }
@ -294,7 +295,7 @@ func TestReplication(t *testing.T) {
if nodes != nil { if nodes != nil {
triedb.Update(NewWithNodeSet(nodes)) triedb.Update(NewWithNodeSet(nodes))
} }
trie2, err = New(common.Hash{}, hash, triedb) trie2, err = New(TrieID(hash), triedb)
if err != nil { if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err) t.Fatalf("can't recreate trie at %x: %v", exp, err)
} }
@ -377,6 +378,7 @@ const (
opCommit opCommit
opItercheckhash opItercheckhash
opNodeDiff opNodeDiff
opProve
opMax // boundary value, not an actual op opMax // boundary value, not an actual op
) )
@ -402,7 +404,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
step.key = genKey() step.key = genKey()
step.value = make([]byte, 8) step.value = make([]byte, 8)
binary.BigEndian.PutUint64(step.value, uint64(i)) binary.BigEndian.PutUint64(step.value, uint64(i))
case opGet, opDelete: case opGet, opDelete, opProve:
step.key = genKey() step.key = genKey()
} }
steps = append(steps, step) steps = append(steps, step)
@ -436,24 +438,60 @@ func runRandTest(rt randTest) bool {
if string(v) != want { if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want) rt[i].err = fmt.Errorf("mismatch for key %#x, got %#x want %#x", step.key, v, want)
} }
case opProve:
hash := tr.Hash()
if hash == emptyRoot {
continue
}
proofDb := rawdb.NewMemoryDatabase()
err := tr.Prove(step.key, 0, proofDb)
if err != nil {
rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
}
_, err = VerifyProof(hash, step.key, proofDb)
if err != nil {
rt[i].err = fmt.Errorf("failed for verifying key %#x, %v", step.key, err)
}
case opHash: case opHash:
tr.Hash() tr.Hash()
case opCommit: case opCommit:
hash, nodes, err := tr.Commit(false) root, nodes, err := tr.Commit(true)
if err != nil { if err != nil {
rt[i].err = err rt[i].err = err
return false return false
} }
// Validity the returned nodeset
if nodes != nil {
for path, node := range nodes.updates.nodes {
blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
got := node.prev
if !bytes.Equal(blob, got) {
rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob)
panic(rt[i].err)
}
}
for path, prev := range nodes.deletes {
blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
if !bytes.Equal(blob, prev) {
rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob)
return false
}
}
}
if nodes != nil { if nodes != nil {
triedb.Update(NewWithNodeSet(nodes)) triedb.Update(NewWithNodeSet(nodes))
} }
newtr, err := New(common.Hash{}, hash, triedb) newtr, err := New(TrieID(root), triedb)
if err != nil { if err != nil {
rt[i].err = err rt[i].err = err
return false return false
} }
tr = newtr tr = newtr
// Enable node tracing. Resolve the root node again explicitly
// since it's not captured at the beginning.
tr.tracer = newTracer() tr.tracer = newTracer()
tr.resolveAndTrack(root.Bytes(), nil)
origTrie = tr.Copy() origTrie = tr.Copy()
case opItercheckhash: case opItercheckhash:

View File

@ -17,6 +17,7 @@
package trie package trie
import ( import (
"bytes"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -72,7 +73,7 @@ func TestTrieTracer(t *testing.T) {
if err := db.Update(NewWithNodeSet(nodes)); err != nil { if err := db.Update(NewWithNodeSet(nodes)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
trie, _ = New(common.Hash{}, root, db) trie, _ = New(TrieID(root), db)
trie.tracer = newTracer() trie.tracer = newTracer()
// Delete all the elements, check deletion set // Delete all the elements, check deletion set
@ -124,3 +125,120 @@ func TestTrieTracerNoop(t *testing.T) {
t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList()))
} }
} }
func TestTrieTracePrevValue(t *testing.T) {
db := NewDatabase(rawdb.NewMemoryDatabase())
trie := NewEmpty(db)
trie.tracer = newTracer()
paths, blobs := trie.tracer.prevList()
if len(paths) != 0 || len(blobs) != 0 {
t.Fatalf("Nothing should be tracked")
}
// Insert a batch of entries, all the nodes should be marked as inserted
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
{"horse", "stallion"},
{"shaman", "horse"},
{"doge", "coin"},
{"dog", "puppy"},
{"somethingveryoddindeedthis is", "myothernodedata"},
}
for _, val := range vals {
trie.Update([]byte(val.k), []byte(val.v))
}
paths, blobs = trie.tracer.prevList()
if len(paths) != 0 || len(blobs) != 0 {
t.Fatalf("Nothing should be tracked")
}
// Commit the changes and re-create with new root
root, nodes, _ := trie.Commit(false)
if err := db.Update(NewWithNodeSet(nodes)); err != nil {
t.Fatal(err)
}
trie, _ = New(TrieID(root), db)
trie.tracer = newTracer()
trie.resolveAndTrack(root.Bytes(), nil)
// Load all nodes in trie
for _, val := range vals {
trie.TryGet([]byte(val.k))
}
// Ensure all nodes are tracked by tracer with correct prev-values
iter := trie.NodeIterator(nil)
seen := make(map[string][]byte)
for iter.Next(true) {
// Embedded nodes are ignored since they are not present in
// database.
if iter.Hash() == (common.Hash{}) {
continue
}
seen[string(iter.Path())] = common.CopyBytes(iter.NodeBlob())
}
paths, blobs = trie.tracer.prevList()
if len(paths) != len(seen) || len(blobs) != len(seen) {
t.Fatalf("Unexpected tracked values")
}
for i, path := range paths {
blob := blobs[i]
prev, ok := seen[string(path)]
if !ok {
t.Fatalf("Missing node %v", path)
}
if !bytes.Equal(blob, prev) {
t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
}
}
// Re-open the trie and iterate the trie, ensure nothing will be tracked.
// Iterator will not link any loaded nodes to trie.
trie, _ = New(TrieID(root), db)
trie.tracer = newTracer()
iter = trie.NodeIterator(nil)
for iter.Next(true) {
}
paths, blobs = trie.tracer.prevList()
if len(paths) != 0 || len(blobs) != 0 {
t.Fatalf("Nothing should be tracked")
}
// Re-open the trie and generate proof for entries, ensure nothing will
// be tracked. Prover will not link any loaded nodes to trie.
trie, _ = New(TrieID(root), db)
trie.tracer = newTracer()
for _, val := range vals {
trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
}
paths, blobs = trie.tracer.prevList()
if len(paths) != 0 || len(blobs) != 0 {
t.Fatalf("Nothing should be tracked")
}
// Delete entries from trie, ensure all previous values are correct.
trie, _ = New(TrieID(root), db)
trie.tracer = newTracer()
trie.resolveAndTrack(root.Bytes(), nil)
for _, val := range vals {
trie.TryDelete([]byte(val.k))
}
paths, blobs = trie.tracer.prevList()
if len(paths) != len(seen) || len(blobs) != len(seen) {
t.Fatalf("Unexpected tracked values")
}
for i, path := range paths {
blob := blobs[i]
prev, ok := seen[string(path)]
if !ok {
t.Fatalf("Missing node %v", path)
}
if !bytes.Equal(blob, prev) {
t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
}
}
}

View File

@ -50,45 +50,43 @@ func newTracer() *tracer {
} }
} }
/*
// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. // onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
// Don't change the value outside of function since it's not deep-copied. // Don't change the value outside of function since it's not deep-copied.
func (t *tracer) onRead(key []byte, val []byte) { func (t *tracer) onRead(path []byte, val []byte) {
// Tracer isn't used right now, remove this check later. // Tracer isn't used right now, remove this check later.
if t == nil { if t == nil {
return return
} }
t.origin[string(key)] = val t.origin[string(path)] = val
} }
*/
// onInsert tracks the newly inserted trie node. If it's already in the deletion set // onInsert tracks the newly inserted trie node. If it's already in the deletion set
// (resurrected node), then just wipe it from the deletion set as the "untouched". // (resurrected node), then just wipe it from the deletion set as the "untouched".
func (t *tracer) onInsert(key []byte) { func (t *tracer) onInsert(path []byte) {
// Tracer isn't used right now, remove this check later. // Tracer isn't used right now, remove this check later.
if t == nil { if t == nil {
return return
} }
if _, present := t.delete[string(key)]; present { if _, present := t.delete[string(path)]; present {
delete(t.delete, string(key)) delete(t.delete, string(path))
return return
} }
t.insert[string(key)] = struct{}{} t.insert[string(path)] = struct{}{}
} }
// onDelete tracks the newly deleted trie node. If it's already // onDelete tracks the newly deleted trie node. If it's already
// in the addition set, then just wipe it from the addition set // in the addition set, then just wipe it from the addition set
// as it's untouched. // as it's untouched.
func (t *tracer) onDelete(key []byte) { func (t *tracer) onDelete(path []byte) {
// Tracer isn't used right now, remove this check later. // Tracer isn't used right now, remove this check later.
if t == nil { if t == nil {
return return
} }
if _, present := t.insert[string(key)]; present { if _, present := t.insert[string(path)]; present {
delete(t.insert, string(key)) delete(t.insert, string(path))
return return
} }
t.delete[string(key)] = struct{}{} t.delete[string(path)] = struct{}{}
} }
// insertList returns the tracked inserted trie nodes in list format. // insertList returns the tracked inserted trie nodes in list format.
@ -98,8 +96,8 @@ func (t *tracer) insertList() [][]byte {
return nil return nil
} }
var ret [][]byte var ret [][]byte
for key := range t.insert { for path := range t.insert {
ret = append(ret, []byte(key)) ret = append(ret, []byte(path))
} }
return ret return ret
} }
@ -111,22 +109,37 @@ func (t *tracer) deleteList() [][]byte {
return nil return nil
} }
var ret [][]byte var ret [][]byte
for key := range t.delete { for path := range t.delete {
ret = append(ret, []byte(key)) ret = append(ret, []byte(path))
} }
return ret return ret
} }
/* // prevList returns the tracked node blobs in list format.
func (t *tracer) prevList() ([][]byte, [][]byte) {
// Tracer isn't used right now, remove this check later.
if t == nil {
return nil, nil
}
var (
paths [][]byte
blobs [][]byte
)
for path, blob := range t.origin {
paths = append(paths, []byte(path))
blobs = append(blobs, blob)
}
return paths, blobs
}
// getPrev returns the cached original value of the specified node. // getPrev returns the cached original value of the specified node.
func (t *tracer) getPrev(key []byte) []byte { func (t *tracer) getPrev(path []byte) []byte {
// Don't panic on uninitialized tracer, it's possible in testing. // Tracer isn't used right now, remove this check later.
if t == nil { if t == nil {
return nil return nil
} }
return t.origin[string(key)] return t.origin[string(path)]
} }
*/
// reset clears the content tracked by tracer. // reset clears the content tracked by tracer.
func (t *tracer) reset() { func (t *tracer) reset() {