From b40c10916c13660472d3071f7e5c1fcdee9aceec Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Thu, 16 Feb 2023 20:53:16 +0100 Subject: [PATCH 01/26] params: begin v1.11.2 release cycle --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 2a62c17b5..9be8a26a5 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. From 6428663faf50f8368cedf0297063154483cce72b Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Fri, 17 Feb 2023 11:25:09 +0100 Subject: [PATCH 02/26] eth/catalyst: send INVALID instead of INVALID_BLOCK_HASH (#26696) This change will break one hive test, but pass another and it will be the better way going forward --- beacon/engine/errors.go | 2 -- eth/catalyst/api.go | 2 +- eth/catalyst/api_test.go | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/beacon/engine/errors.go b/beacon/engine/errors.go index b2f311392..fe1137e00 100644 --- a/beacon/engine/errors.go +++ b/beacon/engine/errors.go @@ -74,8 +74,6 @@ var ( // - newPayloadV1: if the payload was accepted, but not processed (side chain) ACCEPTED = "ACCEPTED" - INVALIDBLOCKHASH = "INVALID_BLOCK_HASH" - GenericServerError = &EngineAPIError{code: -32000, msg: "Server error"} UnknownPayload = &EngineAPIError{code: -38001, msg: "Unknown payload"} InvalidForkChoiceState = &EngineAPIError{code: -38002, msg: "Invalid forkchoice state"} diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 00c477218..cee9b2c50 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -448,7 +448,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData) (engine.Payloa block, err := engine.ExecutableDataToBlock(params) if err != nil { log.Debug("Invalid NewPayload params", "params", params, "error", err) - return engine.PayloadStatusV1{Status: engine.INVALIDBLOCKHASH}, nil + return engine.PayloadStatusV1{Status: engine.INVALID}, nil } // Stash away the last update to warn the user if the beacon client goes offline api.lastNewPayloadLock.Lock() diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index f7881415a..7155fb54d 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -864,8 +864,8 @@ func TestInvalidBloom(t *testing.T) { if err != nil { t.Fatal(err) } - if status.Status != engine.INVALIDBLOCKHASH { - t.Errorf("invalid status: expected VALID got: %v", status.Status) + if status.Status != engine.INVALID { + t.Errorf("invalid status: expected INVALID got: %v", status.Status) } } From 15e5e6176bb384514271c24e53c0b9e2f862b2a4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 17 Feb 2023 20:30:38 +0200 Subject: [PATCH 03/26] eth/catalyst: request too large error (#26722) The method `GetPayloadBodiesByRangeV1` now returns "-38004: Too large request" error if the requested range is too large, according to spec Co-authored-by: Martin Holst Swende --- beacon/engine/errors.go | 1 + eth/catalyst/api.go | 5 ++++- eth/catalyst/api_test.go | 16 +++++++++++----- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/beacon/engine/errors.go b/beacon/engine/errors.go index fe1137e00..769001b9e 100644 --- a/beacon/engine/errors.go +++ b/beacon/engine/errors.go @@ -78,6 +78,7 @@ var ( UnknownPayload = &EngineAPIError{code: -38001, msg: "Unknown payload"} InvalidForkChoiceState = &EngineAPIError{code: -38002, msg: "Invalid forkchoice state"} InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"} + TooLargeRequest = &EngineAPIError{code: -38004, msg: "Too large request"} InvalidParams = &EngineAPIError{code: -32602, msg: "Invalid parameters"} STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil} diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index cee9b2c50..95eed408f 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -785,9 +785,12 @@ func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engin // GetPayloadBodiesByRangeV1 implements engine_getPayloadBodiesByRangeV1 which allows for retrieval of a range // of block bodies by the engine api. func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64) ([]*engine.ExecutionPayloadBodyV1, error) { - if start == 0 || count == 0 || count > 1024 { + if start == 0 || count == 0 { return nil, engine.InvalidParams.With(fmt.Errorf("invalid start or count, start: %v count: %v", start, count)) } + if count > 1024 { + return nil, engine.TooLargeRequest.With(fmt.Errorf("requested count too large: %v", count)) + } // limit count up until current current := api.eth.BlockChain().CurrentBlock().NumberU64() last := uint64(start) + uint64(count) - 1 diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 7155fb54d..4dc0c0ea7 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -1383,37 +1383,43 @@ func TestGetBlockBodiesByRangeInvalidParams(t *testing.T) { node, eth, _ := setupBodies(t) api := NewConsensusAPI(eth) defer node.Close() - tests := []struct { start hexutil.Uint64 count hexutil.Uint64 + want *engine.EngineAPIError }{ // Genesis { start: 0, count: 1, + want: engine.InvalidParams, }, // No block requested { start: 1, count: 0, + want: engine.InvalidParams, }, // Genesis & no block { start: 0, count: 0, + want: engine.InvalidParams, }, // More than 1024 blocks { start: 1, count: 1025, + want: engine.TooLargeRequest, }, } - - for _, test := range tests { - result, err := api.GetPayloadBodiesByRangeV1(test.start, test.count) + for i, tc := range tests { + result, err := api.GetPayloadBodiesByRangeV1(tc.start, tc.count) if err == nil { - t.Fatalf("expected error, got %v", result) + t.Fatalf("test %d: expected error, got %v", i, result) + } + if have, want := err.Error(), tc.want.Error(); have != want { + t.Fatalf("test %d: have %s, want %s", i, have, want) } } } From 7c749c947a9d5181f5f2c1b3fdb5ea6b0e401e8e Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Sun, 19 Feb 2023 14:10:19 -0500 Subject: [PATCH 04/26] core/trie: remove trie tracer (#26665) This PR contains a small portion of the full pbss PR, namely Remove the tracer from trie (and comitter), and instead using an accessList. Related changes to the Nodeset. --------- Co-authored-by: Gary Rong --- core/state/metrics.go | 14 +- core/state/statedb.go | 29 ++-- trie/committer.go | 29 +--- trie/database.go | 9 +- trie/nodeset.go | 120 +++++++++-------- trie/trie.go | 71 ++++------ trie/trie_test.go | 66 +-------- trie/util_test.go | 304 ------------------------------------------ trie/utils.go | 199 --------------------------- 9 files changed, 116 insertions(+), 725 deletions(-) delete mode 100644 trie/util_test.go delete mode 100644 trie/utils.go diff --git a/core/state/metrics.go b/core/state/metrics.go index e702ef3a8..086666b7d 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -19,12 +19,10 @@ package state import "github.com/ethereum/go-ethereum/metrics" var ( - accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) - storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) - accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) - storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) - accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) - storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) - accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) - storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) + accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) + storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) + accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) + storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) + accountTrieNodesMeter = metrics.NewRegisteredMeter("state/trie/account", nil) + storageTriesNodesMeter = metrics.NewRegisteredMeter("state/trie/storage", nil) ) diff --git a/core/state/statedb.go b/core/state/statedb.go index 3f4bec239..4385c1976 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -970,13 +970,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // Commit objects to the trie, measuring the elapsed time var ( - accountTrieNodesUpdated int - accountTrieNodesDeleted int - storageTrieNodesUpdated int - storageTrieNodesDeleted int - nodes = trie.NewMergedNodeSet() + accountTrieNodes int + storageTrieNodes int + nodes = trie.NewMergedNodeSet() + codeWriter = s.db.DiskDB().NewBatch() ) - codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object @@ -994,17 +992,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted + storageTrieNodes += set.Size() } } - // If the contract is destructed, the storage is still left in the - // database as dangling data. Theoretically it's should be wiped from - // database as well, but in hash-based-scheme it's extremely hard to - // determine that if the trie nodes are also referenced by other storage, - // and in path-based-scheme some technical challenges are still unsolved. - // Although it won't affect the correctness but please fix it TODO(rjl493456442). } if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) @@ -1025,7 +1015,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() + accountTrieNodes = set.Size() } if metrics.EnabledExpensive { s.AccountCommits += time.Since(start) @@ -1034,10 +1024,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) - accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) - storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) - storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) + accountTrieNodesMeter.Mark(int64(accountTrieNodes)) + storageTriesNodesMeter.Mark(int64(storageTrieNodes)) + s.AccountUpdated, s.AccountDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0 } diff --git a/trie/committer.go b/trie/committer.go index c4957f349..745cb7683 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -33,29 +33,22 @@ type leaf struct { // insertion order. type committer struct { nodes *NodeSet - tracer *tracer collectLeaf bool } // newCommitter creates a new committer or picks one from the pool. -func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer { +func newCommitter(nodes *NodeSet, collectLeaf bool) *committer { return &committer{ - nodes: NewNodeSet(owner), - tracer: tracer, + nodes: nodes, collectLeaf: collectLeaf, } } // Commit collapses a node down into a hash node and returns it along with // the modified nodeset. -func (c *committer) Commit(n node) (hashNode, *NodeSet) { +func (c *committer) Commit(n node) hashNode { h := c.commit(nil, n) - // Some nodes can be deleted from trie which can't be captured - // by committer itself. Iterate all deleted nodes tracked by - // tracer and marked them as deleted only if they are present - // in database previously. - c.tracer.markDeletions(c.nodes) - return h.(hashNode), c.nodes + return h.(hashNode) } // commit collapses a node down into a hash node and returns it. @@ -85,12 +78,6 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } - // The short node now is embedded in its parent. Mark the node as - // deleted if it's present in database previously. It's equivalent - // as deletion from database's perspective. - if prev := c.tracer.getPrev(path); len(prev) != 0 { - c.nodes.markDeleted(path, prev) - } return collapsed case *fullNode: hashedKids := c.commitChildren(path, cn) @@ -101,12 +88,6 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } - // The full node now is embedded in its parent. Mark the node as - // deleted if it's present in database previously. It's equivalent - // as deletion from database's perspective. - if prev := c.tracer.getPrev(path); len(prev) != 0 { - c.nodes.markDeleted(path, prev) - } return collapsed case hashNode: return cn @@ -169,7 +150,7 @@ func (c *committer) store(path []byte, n node) node { } ) // Collect the dirty node to nodeset for return. - c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path)) + c.nodes.markUpdated(path, mnode) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database.go b/trie/database.go index 74247d59c..5d47475e0 100644 --- a/trie/database.go +++ b/trie/database.go @@ -792,13 +792,12 @@ func (db *Database) Update(nodes *MergedNodeSet) error { } for _, owner := range order { subset := nodes.sets[owner] - for _, path := range subset.updates.order { - n, ok := subset.updates.nodes[path] - if !ok { - return fmt.Errorf("missing node %x %v", owner, path) + subset.forEachWithOrder(false, func(path string, n *memoryNode) { + if n.isDeleted() { + return // ignore deletion } db.insert(n.hash, int(n.size), n.node) - } + }) } // Link up the account trie and storage trie if the node points // to an account trie leaf. diff --git a/trie/nodeset.go b/trie/nodeset.go index 928172350..4251eccaf 100644 --- a/trie/nodeset.go +++ b/trie/nodeset.go @@ -19,6 +19,7 @@ package trie import ( "fmt" "reflect" + "sort" "strings" "github.com/ethereum/go-ethereum/common" @@ -40,8 +41,8 @@ var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size()) // memorySize returns the total memory size used by this node. // nolint:unused -func (n *memoryNode) memorySize(key int) int { - return int(n.size) + memoryNodeSize + key +func (n *memoryNode) memorySize(pathlen int) int { + return int(n.size) + memoryNodeSize + pathlen } // rlp returns the raw rlp encoded blob of the cached trie node, either directly @@ -64,14 +65,20 @@ func (n *memoryNode) obj() node { return expandNode(n.hash[:], n.node) } +// isDeleted returns the indicator if the node is marked as deleted. +func (n *memoryNode) isDeleted() bool { + return n.hash == (common.Hash{}) +} + // nodeWithPrev wraps the memoryNode with the previous node value. +// nolint: unused type nodeWithPrev struct { *memoryNode prev []byte // RLP-encoded previous value, nil means it's non-existent } // unwrap returns the internal memoryNode object. -// nolint:unused +// nolint: unused func (n *nodeWithPrev) unwrap() *memoryNode { return n.memoryNode } @@ -79,64 +86,69 @@ func (n *nodeWithPrev) unwrap() *memoryNode { // memorySize returns the total memory size used by this node. It overloads // the function in memoryNode by counting the size of previous value as well. // nolint: unused -func (n *nodeWithPrev) memorySize(key int) int { - return n.memoryNode.memorySize(key) + len(n.prev) -} - -// nodesWithOrder represents a collection of dirty nodes which includes -// newly-inserted and updated nodes. The modification order of all nodes -// is represented by order list. -type nodesWithOrder struct { - order []string // the path list of dirty nodes, sort by insertion order - nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path +func (n *nodeWithPrev) memorySize(pathlen int) int { + return n.memoryNode.memorySize(pathlen) + len(n.prev) } // NodeSet contains all dirty nodes collected during the commit operation. // Each node is keyed by path. It's not thread-safe to use. type NodeSet struct { - owner common.Hash // the identifier of the trie - updates *nodesWithOrder // the set of updated nodes(newly inserted, updated) - deletes map[string][]byte // the map of deleted nodes, keyed by node - leaves []*leaf // the list of dirty leaves + owner common.Hash // the identifier of the trie + nodes map[string]*memoryNode // the set of dirty nodes(inserted, updated, deleted) + leaves []*leaf // the list of dirty leaves + accessList map[string][]byte // The list of accessed nodes, which records the original node value } // NewNodeSet initializes an empty node set to be used for tracking dirty nodes -// from a specific account or storage trie. The owner is zero for the account -// trie and the owning account address hash for storage tries. -func NewNodeSet(owner common.Hash) *NodeSet { +// for a specific account or storage trie. The owner is zero for the account trie +// and the owning account address hash for storage tries. The provided accessList +// represents the original value of accessed nodes, it can be optional but would +// be beneficial for speeding up the construction of trie history. +func NewNodeSet(owner common.Hash, accessList map[string][]byte) *NodeSet { + // Don't panic for lazy users + if accessList == nil { + accessList = make(map[string][]byte) + } return &NodeSet{ - owner: owner, - updates: &nodesWithOrder{ - nodes: make(map[string]*nodeWithPrev), - }, - deletes: make(map[string][]byte), + owner: owner, + nodes: make(map[string]*memoryNode), + accessList: accessList, } } -/* -// NewNodeSetWithDeletion initializes the nodeset with provided deletion set. -func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet { - set := NewNodeSet(owner) - for i, path := range paths { - set.markDeleted(path, prev[i]) +// forEachWithOrder iterates the dirty nodes with the specified order. +// If topToBottom is true: +// +// then the order of iteration is top to bottom, left to right. +// +// If topToBottom is false: +// +// then the order of iteration is bottom to top, right to left. +func (set *NodeSet) forEachWithOrder(topToBottom bool, callback func(path string, n *memoryNode)) { + var paths sort.StringSlice + for path := range set.nodes { + paths = append(paths, path) + } + if topToBottom { + paths.Sort() + } else { + sort.Sort(sort.Reverse(paths)) + } + for _, path := range paths { + callback(path, set.nodes[path]) } - return set } -*/ // markUpdated marks the node as dirty(newly-inserted or updated) with provided // node path, node object along with its previous value. -func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) { - set.updates.order = append(set.updates.order, string(path)) - set.updates.nodes[string(path)] = &nodeWithPrev{ - memoryNode: node, - prev: prev, - } +func (set *NodeSet) markUpdated(path []byte, node *memoryNode) { + set.nodes[string(path)] = node } // markDeleted marks the node as deleted with provided path and previous value. -func (set *NodeSet) markDeleted(path []byte, prev []byte) { - set.deletes[string(path)] = prev +// nolint: unused +func (set *NodeSet) markDeleted(path []byte) { + set.nodes[string(path)] = &memoryNode{} } // addLeaf collects the provided leaf node into set. @@ -144,16 +156,16 @@ func (set *NodeSet) addLeaf(node *leaf) { set.leaves = append(set.leaves, node) } -// Size returns the number of updated and deleted nodes contained in the set. -func (set *NodeSet) Size() (int, int) { - return len(set.updates.order), len(set.deletes) +// Size returns the number of dirty nodes contained in the set. +func (set *NodeSet) Size() int { + return len(set.nodes) } // Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) Hashes() []common.Hash { var ret []common.Hash - for _, node := range set.updates.nodes { + for _, node := range set.nodes { ret = append(ret, node.hash) } return ret @@ -163,19 +175,17 @@ func (set *NodeSet) Hashes() []common.Hash { func (set *NodeSet) Summary() string { var out = new(strings.Builder) fmt.Fprintf(out, "nodeset owner: %v\n", set.owner) - if set.updates != nil { - for _, key := range set.updates.order { - updated := set.updates.nodes[key] - if updated.prev != nil { - fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev) - } else { - fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash) + if set.nodes != nil { + for path, n := range set.nodes { + // Deletion + if n.isDeleted() { + fmt.Fprintf(out, " [-]: %x\n", path) + continue } + // Update + fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash) } } - for k, n := range set.deletes { - fmt.Fprintf(out, " [-]: %x -> %x\n", k, n) - } for _, n := range set.leaves { fmt.Fprintf(out, "[leaf]: %v\n", n) } diff --git a/trie/trie.go b/trie/trie.go index c467ac476..65f89d91f 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -51,12 +51,11 @@ type Trie struct { // actually unhashed nodes. unhashed int + // accessList tracks the loaded nodes from database. + accessList map[string][]byte + // reader is the handler trie can retrieve nodes from. reader *trieReader - - // tracer is the tool to track the trie changes. - // It will be reset after each commit operation. - tracer *tracer } // newFlag returns the cache flag value for a newly created node. @@ -66,12 +65,16 @@ func (t *Trie) newFlag() nodeFlag { // Copy returns a copy of Trie. func (t *Trie) Copy() *Trie { + accessList := make(map[string][]byte) + for path, blob := range t.accessList { + accessList[path] = common.CopyBytes(blob) + } return &Trie{ - root: t.root, - owner: t.owner, - unhashed: t.unhashed, - reader: t.reader, - tracer: t.tracer.copy(), + root: t.root, + owner: t.owner, + unhashed: t.unhashed, + reader: t.reader, + accessList: accessList, } } @@ -87,9 +90,9 @@ func New(id *ID, db NodeReader) (*Trie, error) { return nil, err } trie := &Trie{ - owner: id.Owner, - reader: reader, - //tracer: newTracer(), + owner: id.Owner, + reader: reader, + accessList: make(map[string][]byte), } if id.Root != (common.Hash{}) && id.Root != emptyRoot { rootnode, err := trie.resolveAndTrack(id.Root[:], nil) @@ -326,11 +329,6 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error if matchlen == 0 { return true, branch, nil } - // New branch node is created as a child of the original short node. - // Track the newly inserted node in the tracer. The node identifier - // passed is the path from the root node. - t.tracer.onInsert(append(prefix, key[:matchlen]...)) - // Replace it with a short node leading up to the branch. return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil @@ -345,11 +343,6 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error return true, n, nil case nil: - // New short node is created and track it in the tracer. The node identifier - // passed is the path from the root node. Note the valueNode won't be tracked - // since it's always embedded in its parent. - t.tracer.onInsert(prefix) - return true, &shortNode{key, value, t.newFlag()}, nil case hashNode: @@ -402,11 +395,6 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, n, nil // don't replace n on mismatch } if matchlen == len(key) { - // The matched short node is deleted entirely and track - // it in the deletion set. The same the valueNode doesn't - // need to be tracked at all since it's always embedded. - t.tracer.onDelete(prefix) - return true, nil, nil // remove n entirely for whole matches } // The key is longer than n.Key. Remove the remaining suffix @@ -419,10 +407,6 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { } switch child := child.(type) { case *shortNode: - // The child shortNode is merged into its parent, track - // is deleted as well. - t.tracer.onDelete(append(prefix, n.Key...)) - // Deleting from the subtrie reduced it to another // short node. Merge the nodes to avoid creating a // shortNode{..., shortNode{...}}. Use concat (which @@ -484,11 +468,6 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, nil, err } if cnode, ok := cnode.(*shortNode); ok { - // Replace the entire full node with the short node. - // Mark the original short node as deleted since the - // value is embedded into the parent now. - t.tracer.onDelete(append(prefix, byte(pos))) - k := append([]byte{byte(pos)}, cnode.Key...) return true, &shortNode{k, cnode.Val, t.newFlag()}, nil } @@ -548,7 +527,7 @@ func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) { if err != nil { return nil, err } - t.tracer.onRead(prefix, blob) + t.accessList[string(prefix)] = blob return mustDecodeNode(n, blob), nil } @@ -567,16 +546,15 @@ func (t *Trie) Hash() common.Hash { // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { - defer t.tracer.reset() - + // Reset accessList at the end of commit + defer func() { + t.accessList = make(map[string][]byte) + }() // Trie is empty and can be classified into two types of situations: // - The trie was empty and no update happens // - The trie was non-empty and all nodes are dropped if t.root == nil { - // Wrap tracked deletions as the return - set := NewNodeSet(t.owner) - t.tracer.markDeletions(set) - return emptyRoot, set + return emptyRoot, nil } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -590,8 +568,9 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { t.root = hashedNode return rootHash, nil } - h := newCommitter(t.owner, t.tracer, collectLeaf) - newRoot, nodes := h.Commit(t.root) + nodes := NewNodeSet(t.owner, t.accessList) + h := newCommitter(nodes, collectLeaf) + newRoot := h.Commit(t.root) t.root = newRoot return rootHash, nodes } @@ -614,5 +593,5 @@ func (t *Trie) Reset() { t.root = nil t.owner = common.Hash{} t.unhashed = 0 - t.tracer.reset() + t.accessList = make(map[string][]byte) } diff --git a/trie/trie_test.go b/trie/trie_test.go index 2fb97eebb..b87b7d3cd 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -410,7 +410,6 @@ func runRandTest(rt randTest) bool { values = make(map[string]string) // tracks content of the trie origTrie = NewEmpty(triedb) ) - tr.tracer = newTracer() for i, step := range rt { // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n", @@ -449,21 +448,14 @@ func runRandTest(rt randTest) bool { root, nodes := tr.Commit(true) // Validity the returned nodeset if nodes != nil { - for path, node := range nodes.updates.nodes { + for path := range nodes.nodes { blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) - got := node.prev + got := nodes.accessList[path] if !bytes.Equal(blob, got) { rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob) panic(rt[i].err) } } - for path, prev := range nodes.deletes { - blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) - if !bytes.Equal(blob, prev) { - rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob) - return false - } - } } if nodes != nil { triedb.Update(NewWithNodeSet(nodes)) @@ -477,7 +469,6 @@ func runRandTest(rt randTest) bool { // Enable node tracing. Resolve the root node again explicitly // since it's not captured at the beginning. - tr.tracer = newTracer() tr.resolveAndTrack(root.Bytes(), nil) origTrie = tr.Copy() @@ -490,59 +481,6 @@ func runRandTest(rt randTest) bool { if tr.Hash() != checktr.Hash() { rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") } - case opNodeDiff: - var ( - inserted = tr.tracer.insertList() - deleted = tr.tracer.deleteList() - origIter = origTrie.NodeIterator(nil) - curIter = tr.NodeIterator(nil) - origSeen = make(map[string]struct{}) - curSeen = make(map[string]struct{}) - ) - for origIter.Next(true) { - if origIter.Leaf() { - continue - } - origSeen[string(origIter.Path())] = struct{}{} - } - for curIter.Next(true) { - if curIter.Leaf() { - continue - } - curSeen[string(curIter.Path())] = struct{}{} - } - var ( - insertExp = make(map[string]struct{}) - deleteExp = make(map[string]struct{}) - ) - for path := range curSeen { - _, present := origSeen[path] - if !present { - insertExp[path] = struct{}{} - } - } - for path := range origSeen { - _, present := curSeen[path] - if !present { - deleteExp[path] = struct{}{} - } - } - if len(insertExp) != len(inserted) { - rt[i].err = fmt.Errorf("insert set mismatch") - } - if len(deleteExp) != len(deleted) { - rt[i].err = fmt.Errorf("delete set mismatch") - } - for _, insert := range inserted { - if _, present := insertExp[string(insert)]; !present { - rt[i].err = fmt.Errorf("missing inserted node") - } - } - for _, del := range deleted { - if _, present := deleteExp[string(del)]; !present { - rt[i].err = fmt.Errorf("missing deleted node") - } - } } // Abort the test on error. if rt[i].err != nil { diff --git a/trie/util_test.go b/trie/util_test.go deleted file mode 100644 index 01a46553a..000000000 --- a/trie/util_test.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" -) - -// Tests if the trie diffs are tracked correctly. -func TestTrieTracer(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - trie.Hash() - - seen := make(map[string]struct{}) - it := trie.NodeIterator(nil) - for it.Next(true) { - if it.Leaf() { - continue - } - seen[string(it.Path())] = struct{}{} - } - inserted := trie.tracer.insertList() - if len(inserted) != len(seen) { - t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted)) - } - for _, k := range inserted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } - deleted := trie.tracer.deleteList() - if len(deleted) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) - } - - // Commit the changes and re-create with new root - root, nodes := trie.Commit(false) - if err := db.Update(NewWithNodeSet(nodes)); err != nil { - t.Fatal(err) - } - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - - // Delete all the elements, check deletion set - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - trie.Hash() - - inserted = trie.tracer.insertList() - if len(inserted) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(inserted)) - } - deleted = trie.tracer.deleteList() - if len(deleted) != len(seen) { - t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted)) - } - for _, k := range deleted { - _, ok := seen[string(k)] - if !ok { - t.Fatalf("Unexpected inserted node") - } - } -} - -func TestTrieTracerNoop(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - if len(trie.tracer.insertList()) != 0 { - t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList())) - } - if len(trie.tracer.deleteList()) != 0 { - t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) - } -} - -func TestTrieTracePrevValue(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - paths, blobs := trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Commit the changes and re-create with new root - root, nodes := trie.Commit(false) - if err := db.Update(NewWithNodeSet(nodes)); err != nil { - t.Fatal(err) - } - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - // Load all nodes in trie - for _, val := range vals { - trie.TryGet([]byte(val.k)) - } - - // Ensure all nodes are tracked by tracer with correct prev-values - iter := trie.NodeIterator(nil) - seen := make(map[string][]byte) - for iter.Next(true) { - // Embedded nodes are ignored since they are not present in - // database. - if iter.Hash() == (common.Hash{}) { - continue - } - seen[string(iter.Path())] = common.CopyBytes(iter.NodeBlob()) - } - - paths, blobs = trie.tracer.prevList() - if len(paths) != len(seen) || len(blobs) != len(seen) { - t.Fatalf("Unexpected tracked values") - } - for i, path := range paths { - blob := blobs[i] - prev, ok := seen[string(path)] - if !ok { - t.Fatalf("Missing node %v", path) - } - if !bytes.Equal(blob, prev) { - t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) - } - } - - // Re-open the trie and iterate the trie, ensure nothing will be tracked. - // Iterator will not link any loaded nodes to trie. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - - iter = trie.NodeIterator(nil) - for iter.Next(true) { - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Re-open the trie and generate proof for entries, ensure nothing will - // be tracked. Prover will not link any loaded nodes to trie. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - for _, val := range vals { - trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != 0 || len(blobs) != 0 { - t.Fatalf("Nothing should be tracked") - } - - // Delete entries from trie, ensure all previous values are correct. - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - for _, val := range vals { - trie.TryDelete([]byte(val.k)) - } - paths, blobs = trie.tracer.prevList() - if len(paths) != len(seen) || len(blobs) != len(seen) { - t.Fatalf("Unexpected tracked values") - } - for i, path := range paths { - blob := blobs[i] - prev, ok := seen[string(path)] - if !ok { - t.Fatalf("Missing node %v", path) - } - if !bytes.Equal(blob, prev) { - t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) - } - } -} - -func TestDeleteAll(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) - trie := NewEmpty(db) - trie.tracer = newTracer() - - // Insert a batch of entries, all the nodes should be marked as inserted - vals := []struct{ k, v string }{ - {"do", "verb"}, - {"ether", "wookiedoo"}, - {"horse", "stallion"}, - {"shaman", "horse"}, - {"doge", "coin"}, - {"dog", "puppy"}, - {"somethingveryoddindeedthis is", "myothernodedata"}, - } - for _, val := range vals { - trie.Update([]byte(val.k), []byte(val.v)) - } - root, set := trie.Commit(false) - if err := db.Update(NewWithNodeSet(set)); err != nil { - t.Fatal(err) - } - // Delete entries from trie, ensure all values are detected - trie, _ = New(TrieID(root), db) - trie.tracer = newTracer() - trie.resolveAndTrack(root.Bytes(), nil) - - // Iterate all existent nodes - var ( - it = trie.NodeIterator(nil) - nodes = make(map[string][]byte) - ) - for it.Next(true) { - if it.Hash() != (common.Hash{}) { - nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) - } - } - - // Perform deletion to purge the entire trie - for _, val := range vals { - trie.Delete([]byte(val.k)) - } - root, set = trie.Commit(false) - if root != emptyRoot { - t.Fatalf("Invalid trie root %v", root) - } - for path, blob := range set.deletes { - prev, ok := nodes[path] - if !ok { - t.Fatalf("Extra node deleted %v", []byte(path)) - } - if !bytes.Equal(prev, blob) { - t.Fatalf("Unexpected previous value %v", []byte(path)) - } - } - if len(set.deletes) != len(nodes) { - t.Fatalf("Unexpected deletion set") - } -} diff --git a/trie/utils.go b/trie/utils.go deleted file mode 100644 index 5dce65cd2..000000000 --- a/trie/utils.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -// tracer tracks the changes of trie nodes. During the trie operations, -// some nodes can be deleted from the trie, while these deleted nodes -// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted -// nodes won't be removed from the disk at all. Tracer is an auxiliary tool -// used to track all insert and delete operations of trie and capture all -// deleted nodes eventually. -// -// The changed nodes can be mainly divided into two categories: the leaf -// node and intermediate node. The former is inserted/deleted by callers -// while the latter is inserted/deleted in order to follow the rule of trie. -// This tool can track all of them no matter the node is embedded in its -// parent or not, but valueNode is never tracked. -// -// Besides, it's also used for recording the original value of the nodes -// when they are resolved from the disk. The pre-value of the nodes will -// be used to construct reverse-diffs in the future. -// -// Note tracer is not thread-safe, callers should be responsible for handling -// the concurrency issues by themselves. -type tracer struct { - insert map[string]struct{} - delete map[string]struct{} - origin map[string][]byte -} - -// newTracer initializes the tracer for capturing trie changes. -func newTracer() *tracer { - return &tracer{ - insert: make(map[string]struct{}), - delete: make(map[string]struct{}), - origin: make(map[string][]byte), - } -} - -// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. -// Don't change the value outside of function since it's not deep-copied. -func (t *tracer) onRead(path []byte, val []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.origin[string(path)] = val -} - -// onInsert tracks the newly inserted trie node. If it's already in the deletion set -// (resurrected node), then just wipe it from the deletion set as the "untouched". -func (t *tracer) onInsert(path []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.delete[string(path)]; present { - delete(t.delete, string(path)) - return - } - t.insert[string(path)] = struct{}{} -} - -// onDelete tracks the newly deleted trie node. If it's already -// in the addition set, then just wipe it from the addition set -// as it's untouched. -func (t *tracer) onDelete(path []byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - if _, present := t.insert[string(path)]; present { - delete(t.insert, string(path)) - return - } - t.delete[string(path)] = struct{}{} -} - -// insertList returns the tracked inserted trie nodes in list format. -func (t *tracer) insertList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for path := range t.insert { - ret = append(ret, []byte(path)) - } - return ret -} - -// deleteList returns the tracked deleted trie nodes in list format. -func (t *tracer) deleteList() [][]byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ret [][]byte - for path := range t.delete { - ret = append(ret, []byte(path)) - } - return ret -} - -// prevList returns the tracked node blobs in list format. -func (t *tracer) prevList() ([][]byte, [][]byte) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil, nil - } - var ( - paths [][]byte - blobs [][]byte - ) - for path, blob := range t.origin { - paths = append(paths, []byte(path)) - blobs = append(blobs, blob) - } - return paths, blobs -} - -// getPrev returns the cached original value of the specified node. -func (t *tracer) getPrev(path []byte) []byte { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - return t.origin[string(path)] -} - -// reset clears the content tracked by tracer. -func (t *tracer) reset() { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - t.insert = make(map[string]struct{}) - t.delete = make(map[string]struct{}) - t.origin = make(map[string][]byte) -} - -// copy returns a deep copied tracer instance. -func (t *tracer) copy() *tracer { - // Tracer isn't used right now, remove this check later. - if t == nil { - return nil - } - var ( - insert = make(map[string]struct{}) - delete = make(map[string]struct{}) - origin = make(map[string][]byte) - ) - for key := range t.insert { - insert[key] = struct{}{} - } - for key := range t.delete { - delete[key] = struct{}{} - } - for key, val := range t.origin { - origin[key] = val - } - return &tracer{ - insert: insert, - delete: delete, - origin: origin, - } -} - -// markDeletions puts all tracked deletions into the provided nodeset. -func (t *tracer) markDeletions(set *NodeSet) { - // Tracer isn't used right now, remove this check later. - if t == nil { - return - } - for _, path := range t.deleteList() { - // There are a few possibilities for this scenario(the node is deleted - // but not present in database previously), for example the node was - // embedded in the parent and now deleted from the trie. In this case - // it's noop from database's perspective. - val := t.getPrev(path) - if len(val) == 0 { - continue - } - set.markDeleted(path, val) - } -} From 1db978ca6b28b28d5eda9cf04b519ee301b05345 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Sun, 19 Feb 2023 20:23:18 +0100 Subject: [PATCH 05/26] rpc: fix unmarshaling of null result in CallContext (#26723) The change fixes unmarshaling of JSON null results into json.RawMessage. --------- Co-authored-by: Jason Yuan Co-authored-by: Jason Yuan --- rpc/client.go | 5 ++++- rpc/client_test.go | 20 ++++++++++++++++++++ rpc/server_test.go | 2 +- rpc/testservice_test.go | 4 ++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/rpc/client.go b/rpc/client.go index a509cb2e0..69ff4851e 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -345,7 +345,10 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str case len(resp.Result) == 0: return ErrNoResult default: - return json.Unmarshal(resp.Result, &result) + if result == nil { + return nil + } + return json.Unmarshal(resp.Result, result) } } diff --git a/rpc/client_test.go b/rpc/client_test.go index 0a88ce40b..a94a54929 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -69,6 +69,26 @@ func TestClientResponseType(t *testing.T) { } } +// This test checks calling a method that returns 'null'. +func TestClientNullResponse(t *testing.T) { + server := newTestServer() + defer server.Stop() + + client := DialInProc(server) + defer client.Close() + + var result json.RawMessage + if err := client.Call(&result, "test_null"); err != nil { + t.Fatal(err) + } + if result == nil { + t.Fatal("Expected non-nil result") + } + if !reflect.DeepEqual(result, json.RawMessage("null")) { + t.Errorf("Expected null, got %s", result) + } +} + // This test checks that server-returned errors with code and data come out of Client.Call. func TestClientErrorData(t *testing.T) { server := newTestServer() diff --git a/rpc/server_test.go b/rpc/server_test.go index c9abe53e5..f1a9b3d5c 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -45,7 +45,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 12 + wantCallbacks := 13 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index 8454a4019..eab67f1dd 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -78,6 +78,10 @@ func (o *MarshalErrObj) MarshalText() ([]byte, error) { func (s *testService) NoArgsRets() {} +func (s *testService) Null() any { + return nil +} + func (s *testService) Echo(str string, i int, args *echoArgs) echoResult { return echoResult{str, i, args} } From 2166c86041800c91b64c2b72c5b33512e529ec77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 08:53:15 +0200 Subject: [PATCH 06/26] build: ship bootstrapper Go along with builder for PPA (#26731) --- build/checksums.txt | 3 +++ build/ci.go | 45 +++++++++++++++++++++++++++++------- build/deb/ethereum/deb.rules | 5 ++++ 3 files changed, 45 insertions(+), 8 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index d00d9bf96..0cbf700c9 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -41,3 +41,6 @@ c4f58b7e227b9fd41f0e9310dc83f4a4e7d026598e2f6e95b78761081a6d9bd2 golangci-lint- eb57f9bcb56646f2e3d6ccaf02ec227815fb05077b2e0b1bf9e755805acdc2b9 golangci-lint-1.51.1-windows-arm64.zip bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-1.51.1-windows-armv6.zip cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip + +# This is the builder on PPA that will build Go itself (inception-y), don't modify! +9419cc70dc5a2523f29a77053cafff658ed21ef3561d9b6b020280ebceab28b9 go1.19.src.tar.gz diff --git a/build/ci.go b/build/ci.go index 173dc7cc0..0a6eb2b49 100644 --- a/build/ci.go +++ b/build/ci.go @@ -136,10 +136,18 @@ var ( "golang-go": "/usr/lib/go", } - // This is the version of go that will be downloaded by + // This is the version of Go that will be downloaded by // // go run ci.go install -dlgo dlgoVersion = "1.20.1" + + // This is the version of Go that will be used to bootstrap the PPA builder. + // + // This version is fine to be old and full of security holes, we just use it + // to build the latest Go. Don't change it. If it ever becomes infufficient, + // we need to switch over to a recursive builder to jumpt across supported + // versions. + gobootVersion = "1.19" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -655,10 +663,11 @@ func doDebianSource(cmdline []string) { gpg.Stdin = bytes.NewReader(key) build.MustRun(gpg) } - - // Download and verify the Go source package. - gobundle := downloadGoSources(*cachedir) - + // Download and verify the Go source packages. + var ( + gobootbundle = downloadGoBootstrapSources(*cachedir) + gobundle = downloadGoSources(*cachedir) + ) // Download all the dependencies needed to build the sources and run the ci script srcdepfetch := tc.Go("mod", "download") srcdepfetch.Env = append(srcdepfetch.Env, "GOPATH="+filepath.Join(*workdir, "modgopath")) @@ -675,12 +684,19 @@ func doDebianSource(cmdline []string) { meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) pkgdir := stageDebianSource(*workdir, meta) - // Add Go source code + // Add bootstrapper Go source code + if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { + log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) + } + if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil { + log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) + } + // Add builder Go source code if err := build.ExtractArchive(gobundle, pkgdir); err != nil { - log.Fatalf("Failed to extract Go sources: %v", err) + log.Fatalf("Failed to extract builder Go sources: %v", err) } if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { - log.Fatalf("Failed to rename Go source folder: %v", err) + log.Fatalf("Failed to rename builder Go source folder: %v", err) } // Add all dependency modules in compressed form os.MkdirAll(filepath.Join(pkgdir, ".mod", "cache"), 0755) @@ -709,6 +725,19 @@ func doDebianSource(cmdline []string) { } } +// downloadGoBootstrapSources downloads the Go source tarball that will be used +// to bootstrap the builder Go. +func downloadGoBootstrapSources(cachedir string) string { + csdb := build.MustLoadChecksums("build/checksums.txt") + file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) + url := "https://dl.google.com/go/" + file + dst := filepath.Join(cachedir, file) + if err := csdb.DownloadFile(url, dst); err != nil { + log.Fatal(err) + } + return dst +} + // downloadGoSources downloads the Go source tarball. func downloadGoSources(cachedir string) string { csdb := build.MustLoadChecksums("build/checksums.txt") diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 0677ef91e..882b45d14 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -16,6 +16,11 @@ override_dh_auto_build: # We can't download a fresh Go within Launchpad, so we're shipping and building # one on the fly. However, we can't build it inside the go-ethereum folder as # bootstrapping clashes with go modules, so build in a sibling folder. + # + # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version + # requirements opposed to older versions of Go. + (mv .goboot ../ && cd ../.goboot/src && ./make.bash) + (cd ../.goboot/bin && export GOROOT_BOOTSTRAP=`pwd`) (mv .go ../ && cd ../.go/src && ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the From e1e2781105130b8cce6d34b33df7ea90838c1061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 09:56:03 +0200 Subject: [PATCH 07/26] build: fix setting env var, temp early exit --- build/ci.go | 1 + build/deb/ethereum/deb.rules | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build/ci.go b/build/ci.go index 0a6eb2b49..9230945e9 100644 --- a/build/ci.go +++ b/build/ci.go @@ -721,6 +721,7 @@ func doDebianSource(cmdline []string) { if *upload != "" { ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes, buildinfo}) } + return } } } diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 882b45d14..885444a7e 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -20,7 +20,7 @@ override_dh_auto_build: # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # requirements opposed to older versions of Go. (mv .goboot ../ && cd ../.goboot/src && ./make.bash) - (cd ../.goboot/bin && export GOROOT_BOOTSTRAP=`pwd`) + export GOROOT_BOOTSTRAP=`pwd`/../.goboot/bin (mv .go ../ && cd ../.go/src && ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the From 4ec4235fc41b8bfcb5d959f2893cce79ff8cbb16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 10:09:00 +0200 Subject: [PATCH 08/26] build: fix gobootstrap path for the PPA --- build/deb/ethereum/deb.rules | 2 +- internal/build/util.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 885444a7e..6dfe0f8e0 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -20,7 +20,7 @@ override_dh_auto_build: # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # requirements opposed to older versions of Go. (mv .goboot ../ && cd ../.goboot/src && ./make.bash) - export GOROOT_BOOTSTRAP=`pwd`/../.goboot/bin + export GOROOT_BOOTSTRAP=`pwd`/../.goboot (mv .go ../ && cd ../.go/src && ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the diff --git a/internal/build/util.go b/internal/build/util.go index 9a721e9b8..ec25e1dac 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -176,6 +176,7 @@ func UploadSFTP(identityFile, host, dir string, files []string) error { time.Sleep(500 * time.Millisecond) aborted = true sftp.Process.Kill() + return } } }() From a43efceaf2f8a661ad8f4d3f07aa321a82749f99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 10:31:35 +0200 Subject: [PATCH 09/26] build: add some PPA debug logs, sigh --- build/deb/ethereum/deb.rules | 1 + internal/build/util.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 6dfe0f8e0..5797da538 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -21,6 +21,7 @@ override_dh_auto_build: # requirements opposed to older versions of Go. (mv .goboot ../ && cd ../.goboot/src && ./make.bash) export GOROOT_BOOTSTRAP=`pwd`/../.goboot + echo $GOROOT_BOOTSTRAP (mv .go ../ && cd ../.go/src && ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the diff --git a/internal/build/util.go b/internal/build/util.go index ec25e1dac..91b5d71a6 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -130,7 +130,7 @@ func render(tpl *template.Template, outputFile string, outputPerm os.FileMode, x // The destination host may be specified either as [user@]host[: or as a URI in // the form sftp://[user@]host[:port]. func UploadSFTP(identityFile, host, dir string, files []string) error { - sftp := exec.Command("sftp") + sftp := exec.Command("sftp", "-B", "262144") sftp.Stderr = os.Stderr if identityFile != "" { sftp.Args = append(sftp.Args, "-i", identityFile) From 165268430c773376139931d1bde1e3d07d96f203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 10:43:55 +0200 Subject: [PATCH 10/26] internal/build: revert raising the chunk size for PPA --- internal/build/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/build/util.go b/internal/build/util.go index 91b5d71a6..ec25e1dac 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -130,7 +130,7 @@ func render(tpl *template.Template, outputFile string, outputPerm os.FileMode, x // The destination host may be specified either as [user@]host[: or as a URI in // the form sftp://[user@]host[:port]. func UploadSFTP(identityFile, host, dir string, files []string) error { - sftp := exec.Command("sftp", "-B", "262144") + sftp := exec.Command("sftp") sftp.Stderr = os.Stderr if identityFile != "" { sftp.Args = append(sftp.Args, "-i", identityFile) From c02334be1e97b138a4abb478f8c86d67104a9087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 11:07:33 +0200 Subject: [PATCH 11/26] build: yet another weird PPA fix --- build/deb/ethereum/deb.rules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 5797da538..b12011ef7 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -21,8 +21,8 @@ override_dh_auto_build: # requirements opposed to older versions of Go. (mv .goboot ../ && cd ../.goboot/src && ./make.bash) export GOROOT_BOOTSTRAP=`pwd`/../.goboot - echo $GOROOT_BOOTSTRAP - (mv .go ../ && cd ../.go/src && ./make.bash) + echo $(GOROOT_BOOTSTRAP) + (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the # entire dependency source cache with go-ethereum. From 45190548160f97408ce3a13c73dc62d0acd76ba5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 11:31:19 +0200 Subject: [PATCH 12/26] build: fix (finaly?) the PPA env vars for Go bootstrapping --- build/ci.go | 1 - build/deb/ethereum/deb.rules | 2 -- 2 files changed, 3 deletions(-) diff --git a/build/ci.go b/build/ci.go index 9230945e9..0a6eb2b49 100644 --- a/build/ci.go +++ b/build/ci.go @@ -721,7 +721,6 @@ func doDebianSource(cmdline []string) { if *upload != "" { ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes, buildinfo}) } - return } } } diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index b12011ef7..475e628b2 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -20,8 +20,6 @@ override_dh_auto_build: # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # requirements opposed to older versions of Go. (mv .goboot ../ && cd ../.goboot/src && ./make.bash) - export GOROOT_BOOTSTRAP=`pwd`/../.goboot - echo $(GOROOT_BOOTSTRAP) (mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash) # We can't download external go modules within Launchpad, so we're shipping the From 41dee2623eccfe5bc54a4d9281aa7da71a4ed200 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 12:36:46 +0200 Subject: [PATCH 13/26] build: fix Go 1.19.0 bootstrapper issues on 386 PPA --- build/checksums.txt | 2 +- build/ci.go | 2 +- internal/build/util.go | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/build/checksums.txt b/build/checksums.txt index 0cbf700c9..1fb85d626 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -43,4 +43,4 @@ bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint- cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip # This is the builder on PPA that will build Go itself (inception-y), don't modify! -9419cc70dc5a2523f29a77053cafff658ed21ef3561d9b6b020280ebceab28b9 go1.19.src.tar.gz +d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz diff --git a/build/ci.go b/build/ci.go index 0a6eb2b49..73ec227b3 100644 --- a/build/ci.go +++ b/build/ci.go @@ -147,7 +147,7 @@ var ( // to build the latest Go. Don't change it. If it ever becomes infufficient, // we need to switch over to a recursive builder to jumpt across supported // versions. - gobootVersion = "1.19" + gobootVersion = "1.19.6" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) diff --git a/internal/build/util.go b/internal/build/util.go index ec25e1dac..9a721e9b8 100644 --- a/internal/build/util.go +++ b/internal/build/util.go @@ -176,7 +176,6 @@ func UploadSFTP(identityFile, host, dir string, files []string) error { time.Sleep(500 * time.Millisecond) aborted = true sftp.Process.Kill() - return } } }() From ba4267fcacdb45fe4480773011d7647316f6cd97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 20 Feb 2023 13:26:37 +0200 Subject: [PATCH 14/26] build: enable Lunar Lobster PPA builds --- build/ci.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/ci.go b/build/ci.go index 73ec227b3..2aad2ac52 100644 --- a/build/ci.go +++ b/build/ci.go @@ -128,7 +128,7 @@ var ( "focal": "golang-go", // EOL: 04/2030 "jammy": "golang-go", // EOL: 04/2032 "kinetic": "golang-go", // EOL: 07/2023 - //"lunar": "golang-go", // EOL: 01/2024 + "lunar": "golang-go", // EOL: 01/2024 } debGoBootPaths = map[string]string{ @@ -144,7 +144,7 @@ var ( // This is the version of Go that will be used to bootstrap the PPA builder. // // This version is fine to be old and full of security holes, we just use it - // to build the latest Go. Don't change it. If it ever becomes infufficient, + // to build the latest Go. Don't change it. If it ever becomes insufficient, // we need to switch over to a recursive builder to jumpt across supported // versions. gobootVersion = "1.19.6" From 13ef21d4672742e805316df9f319d51e749a129d Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Mon, 20 Feb 2023 22:54:52 +0800 Subject: [PATCH 15/26] Revert "core/trie: remove trie tracer (#26665)" (#26732) This reverts commit 7c749c947a9d5181f5f2c1b3fdb5ea6b0e401e8e. --- core/state/metrics.go | 14 +- core/state/statedb.go | 29 ++-- trie/committer.go | 29 +++- trie/database.go | 9 +- trie/nodeset.go | 120 ++++++++--------- trie/trie.go | 71 ++++++---- trie/trie_test.go | 66 ++++++++- trie/util_test.go | 304 ++++++++++++++++++++++++++++++++++++++++++ trie/utils.go | 199 +++++++++++++++++++++++++++ 9 files changed, 725 insertions(+), 116 deletions(-) create mode 100644 trie/util_test.go create mode 100644 trie/utils.go diff --git a/core/state/metrics.go b/core/state/metrics.go index 086666b7d..e702ef3a8 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -19,10 +19,12 @@ package state import "github.com/ethereum/go-ethereum/metrics" var ( - accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) - storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) - accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) - storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) - accountTrieNodesMeter = metrics.NewRegisteredMeter("state/trie/account", nil) - storageTriesNodesMeter = metrics.NewRegisteredMeter("state/trie/storage", nil) + accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil) + storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) + accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) + storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) + accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil) + storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) + accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) + storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) ) diff --git a/core/state/statedb.go b/core/state/statedb.go index 4385c1976..3f4bec239 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -970,11 +970,13 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { // Commit objects to the trie, measuring the elapsed time var ( - accountTrieNodes int - storageTrieNodes int - nodes = trie.NewMergedNodeSet() - codeWriter = s.db.DiskDB().NewBatch() + accountTrieNodesUpdated int + accountTrieNodesDeleted int + storageTrieNodesUpdated int + storageTrieNodesDeleted int + nodes = trie.NewMergedNodeSet() ) + codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object @@ -992,9 +994,17 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - storageTrieNodes += set.Size() + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted } } + // If the contract is destructed, the storage is still left in the + // database as dangling data. Theoretically it's should be wiped from + // database as well, but in hash-based-scheme it's extremely hard to + // determine that if the trie nodes are also referenced by other storage, + // and in path-based-scheme some technical challenges are still unsolved. + // Although it won't affect the correctness but please fix it TODO(rjl493456442). } if len(s.stateObjectsDirty) > 0 { s.stateObjectsDirty = make(map[common.Address]struct{}) @@ -1015,7 +1025,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { if err := nodes.Merge(set); err != nil { return common.Hash{}, err } - accountTrieNodes = set.Size() + accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() } if metrics.EnabledExpensive { s.AccountCommits += time.Since(start) @@ -1024,9 +1034,10 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { storageUpdatedMeter.Mark(int64(s.StorageUpdated)) accountDeletedMeter.Mark(int64(s.AccountDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieNodesMeter.Mark(int64(accountTrieNodes)) - storageTriesNodesMeter.Mark(int64(storageTrieNodes)) - + accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) + accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) + storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) + storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) s.AccountUpdated, s.AccountDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0 } diff --git a/trie/committer.go b/trie/committer.go index 745cb7683..c4957f349 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -33,22 +33,29 @@ type leaf struct { // insertion order. type committer struct { nodes *NodeSet + tracer *tracer collectLeaf bool } // newCommitter creates a new committer or picks one from the pool. -func newCommitter(nodes *NodeSet, collectLeaf bool) *committer { +func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer { return &committer{ - nodes: nodes, + nodes: NewNodeSet(owner), + tracer: tracer, collectLeaf: collectLeaf, } } // Commit collapses a node down into a hash node and returns it along with // the modified nodeset. -func (c *committer) Commit(n node) hashNode { +func (c *committer) Commit(n node) (hashNode, *NodeSet) { h := c.commit(nil, n) - return h.(hashNode) + // Some nodes can be deleted from trie which can't be captured + // by committer itself. Iterate all deleted nodes tracked by + // tracer and marked them as deleted only if they are present + // in database previously. + c.tracer.markDeletions(c.nodes) + return h.(hashNode), c.nodes } // commit collapses a node down into a hash node and returns it. @@ -78,6 +85,12 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } + // The short node now is embedded in its parent. Mark the node as + // deleted if it's present in database previously. It's equivalent + // as deletion from database's perspective. + if prev := c.tracer.getPrev(path); len(prev) != 0 { + c.nodes.markDeleted(path, prev) + } return collapsed case *fullNode: hashedKids := c.commitChildren(path, cn) @@ -88,6 +101,12 @@ func (c *committer) commit(path []byte, n node) node { if hn, ok := hashedNode.(hashNode); ok { return hn } + // The full node now is embedded in its parent. Mark the node as + // deleted if it's present in database previously. It's equivalent + // as deletion from database's perspective. + if prev := c.tracer.getPrev(path); len(prev) != 0 { + c.nodes.markDeleted(path, prev) + } return collapsed case hashNode: return cn @@ -150,7 +169,7 @@ func (c *committer) store(path []byte, n node) node { } ) // Collect the dirty node to nodeset for return. - c.nodes.markUpdated(path, mnode) + c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path)) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database.go b/trie/database.go index 5d47475e0..74247d59c 100644 --- a/trie/database.go +++ b/trie/database.go @@ -792,12 +792,13 @@ func (db *Database) Update(nodes *MergedNodeSet) error { } for _, owner := range order { subset := nodes.sets[owner] - subset.forEachWithOrder(false, func(path string, n *memoryNode) { - if n.isDeleted() { - return // ignore deletion + for _, path := range subset.updates.order { + n, ok := subset.updates.nodes[path] + if !ok { + return fmt.Errorf("missing node %x %v", owner, path) } db.insert(n.hash, int(n.size), n.node) - }) + } } // Link up the account trie and storage trie if the node points // to an account trie leaf. diff --git a/trie/nodeset.go b/trie/nodeset.go index 4251eccaf..928172350 100644 --- a/trie/nodeset.go +++ b/trie/nodeset.go @@ -19,7 +19,6 @@ package trie import ( "fmt" "reflect" - "sort" "strings" "github.com/ethereum/go-ethereum/common" @@ -41,8 +40,8 @@ var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size()) // memorySize returns the total memory size used by this node. // nolint:unused -func (n *memoryNode) memorySize(pathlen int) int { - return int(n.size) + memoryNodeSize + pathlen +func (n *memoryNode) memorySize(key int) int { + return int(n.size) + memoryNodeSize + key } // rlp returns the raw rlp encoded blob of the cached trie node, either directly @@ -65,20 +64,14 @@ func (n *memoryNode) obj() node { return expandNode(n.hash[:], n.node) } -// isDeleted returns the indicator if the node is marked as deleted. -func (n *memoryNode) isDeleted() bool { - return n.hash == (common.Hash{}) -} - // nodeWithPrev wraps the memoryNode with the previous node value. -// nolint: unused type nodeWithPrev struct { *memoryNode prev []byte // RLP-encoded previous value, nil means it's non-existent } // unwrap returns the internal memoryNode object. -// nolint: unused +// nolint:unused func (n *nodeWithPrev) unwrap() *memoryNode { return n.memoryNode } @@ -86,69 +79,64 @@ func (n *nodeWithPrev) unwrap() *memoryNode { // memorySize returns the total memory size used by this node. It overloads // the function in memoryNode by counting the size of previous value as well. // nolint: unused -func (n *nodeWithPrev) memorySize(pathlen int) int { - return n.memoryNode.memorySize(pathlen) + len(n.prev) +func (n *nodeWithPrev) memorySize(key int) int { + return n.memoryNode.memorySize(key) + len(n.prev) +} + +// nodesWithOrder represents a collection of dirty nodes which includes +// newly-inserted and updated nodes. The modification order of all nodes +// is represented by order list. +type nodesWithOrder struct { + order []string // the path list of dirty nodes, sort by insertion order + nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path } // NodeSet contains all dirty nodes collected during the commit operation. // Each node is keyed by path. It's not thread-safe to use. type NodeSet struct { - owner common.Hash // the identifier of the trie - nodes map[string]*memoryNode // the set of dirty nodes(inserted, updated, deleted) - leaves []*leaf // the list of dirty leaves - accessList map[string][]byte // The list of accessed nodes, which records the original node value + owner common.Hash // the identifier of the trie + updates *nodesWithOrder // the set of updated nodes(newly inserted, updated) + deletes map[string][]byte // the map of deleted nodes, keyed by node + leaves []*leaf // the list of dirty leaves } // NewNodeSet initializes an empty node set to be used for tracking dirty nodes -// for a specific account or storage trie. The owner is zero for the account trie -// and the owning account address hash for storage tries. The provided accessList -// represents the original value of accessed nodes, it can be optional but would -// be beneficial for speeding up the construction of trie history. -func NewNodeSet(owner common.Hash, accessList map[string][]byte) *NodeSet { - // Don't panic for lazy users - if accessList == nil { - accessList = make(map[string][]byte) - } +// from a specific account or storage trie. The owner is zero for the account +// trie and the owning account address hash for storage tries. +func NewNodeSet(owner common.Hash) *NodeSet { return &NodeSet{ - owner: owner, - nodes: make(map[string]*memoryNode), - accessList: accessList, + owner: owner, + updates: &nodesWithOrder{ + nodes: make(map[string]*nodeWithPrev), + }, + deletes: make(map[string][]byte), } } -// forEachWithOrder iterates the dirty nodes with the specified order. -// If topToBottom is true: -// -// then the order of iteration is top to bottom, left to right. -// -// If topToBottom is false: -// -// then the order of iteration is bottom to top, right to left. -func (set *NodeSet) forEachWithOrder(topToBottom bool, callback func(path string, n *memoryNode)) { - var paths sort.StringSlice - for path := range set.nodes { - paths = append(paths, path) - } - if topToBottom { - paths.Sort() - } else { - sort.Sort(sort.Reverse(paths)) - } - for _, path := range paths { - callback(path, set.nodes[path]) +/* +// NewNodeSetWithDeletion initializes the nodeset with provided deletion set. +func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet { + set := NewNodeSet(owner) + for i, path := range paths { + set.markDeleted(path, prev[i]) } + return set } +*/ // markUpdated marks the node as dirty(newly-inserted or updated) with provided // node path, node object along with its previous value. -func (set *NodeSet) markUpdated(path []byte, node *memoryNode) { - set.nodes[string(path)] = node +func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) { + set.updates.order = append(set.updates.order, string(path)) + set.updates.nodes[string(path)] = &nodeWithPrev{ + memoryNode: node, + prev: prev, + } } // markDeleted marks the node as deleted with provided path and previous value. -// nolint: unused -func (set *NodeSet) markDeleted(path []byte) { - set.nodes[string(path)] = &memoryNode{} +func (set *NodeSet) markDeleted(path []byte, prev []byte) { + set.deletes[string(path)] = prev } // addLeaf collects the provided leaf node into set. @@ -156,16 +144,16 @@ func (set *NodeSet) addLeaf(node *leaf) { set.leaves = append(set.leaves, node) } -// Size returns the number of dirty nodes contained in the set. -func (set *NodeSet) Size() int { - return len(set.nodes) +// Size returns the number of updated and deleted nodes contained in the set. +func (set *NodeSet) Size() (int, int) { + return len(set.updates.order), len(set.deletes) } // Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) Hashes() []common.Hash { var ret []common.Hash - for _, node := range set.nodes { + for _, node := range set.updates.nodes { ret = append(ret, node.hash) } return ret @@ -175,17 +163,19 @@ func (set *NodeSet) Hashes() []common.Hash { func (set *NodeSet) Summary() string { var out = new(strings.Builder) fmt.Fprintf(out, "nodeset owner: %v\n", set.owner) - if set.nodes != nil { - for path, n := range set.nodes { - // Deletion - if n.isDeleted() { - fmt.Fprintf(out, " [-]: %x\n", path) - continue + if set.updates != nil { + for _, key := range set.updates.order { + updated := set.updates.nodes[key] + if updated.prev != nil { + fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev) + } else { + fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash) } - // Update - fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash) } } + for k, n := range set.deletes { + fmt.Fprintf(out, " [-]: %x -> %x\n", k, n) + } for _, n := range set.leaves { fmt.Fprintf(out, "[leaf]: %v\n", n) } diff --git a/trie/trie.go b/trie/trie.go index 65f89d91f..c467ac476 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -51,11 +51,12 @@ type Trie struct { // actually unhashed nodes. unhashed int - // accessList tracks the loaded nodes from database. - accessList map[string][]byte - // reader is the handler trie can retrieve nodes from. reader *trieReader + + // tracer is the tool to track the trie changes. + // It will be reset after each commit operation. + tracer *tracer } // newFlag returns the cache flag value for a newly created node. @@ -65,16 +66,12 @@ func (t *Trie) newFlag() nodeFlag { // Copy returns a copy of Trie. func (t *Trie) Copy() *Trie { - accessList := make(map[string][]byte) - for path, blob := range t.accessList { - accessList[path] = common.CopyBytes(blob) - } return &Trie{ - root: t.root, - owner: t.owner, - unhashed: t.unhashed, - reader: t.reader, - accessList: accessList, + root: t.root, + owner: t.owner, + unhashed: t.unhashed, + reader: t.reader, + tracer: t.tracer.copy(), } } @@ -90,9 +87,9 @@ func New(id *ID, db NodeReader) (*Trie, error) { return nil, err } trie := &Trie{ - owner: id.Owner, - reader: reader, - accessList: make(map[string][]byte), + owner: id.Owner, + reader: reader, + //tracer: newTracer(), } if id.Root != (common.Hash{}) && id.Root != emptyRoot { rootnode, err := trie.resolveAndTrack(id.Root[:], nil) @@ -329,6 +326,11 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error if matchlen == 0 { return true, branch, nil } + // New branch node is created as a child of the original short node. + // Track the newly inserted node in the tracer. The node identifier + // passed is the path from the root node. + t.tracer.onInsert(append(prefix, key[:matchlen]...)) + // Replace it with a short node leading up to the branch. return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil @@ -343,6 +345,11 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error return true, n, nil case nil: + // New short node is created and track it in the tracer. The node identifier + // passed is the path from the root node. Note the valueNode won't be tracked + // since it's always embedded in its parent. + t.tracer.onInsert(prefix) + return true, &shortNode{key, value, t.newFlag()}, nil case hashNode: @@ -395,6 +402,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, n, nil // don't replace n on mismatch } if matchlen == len(key) { + // The matched short node is deleted entirely and track + // it in the deletion set. The same the valueNode doesn't + // need to be tracked at all since it's always embedded. + t.tracer.onDelete(prefix) + return true, nil, nil // remove n entirely for whole matches } // The key is longer than n.Key. Remove the remaining suffix @@ -407,6 +419,10 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { } switch child := child.(type) { case *shortNode: + // The child shortNode is merged into its parent, track + // is deleted as well. + t.tracer.onDelete(append(prefix, n.Key...)) + // Deleting from the subtrie reduced it to another // short node. Merge the nodes to avoid creating a // shortNode{..., shortNode{...}}. Use concat (which @@ -468,6 +484,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, nil, err } if cnode, ok := cnode.(*shortNode); ok { + // Replace the entire full node with the short node. + // Mark the original short node as deleted since the + // value is embedded into the parent now. + t.tracer.onDelete(append(prefix, byte(pos))) + k := append([]byte{byte(pos)}, cnode.Key...) return true, &shortNode{k, cnode.Val, t.newFlag()}, nil } @@ -527,7 +548,7 @@ func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) { if err != nil { return nil, err } - t.accessList[string(prefix)] = blob + t.tracer.onRead(prefix, blob) return mustDecodeNode(n, blob), nil } @@ -546,15 +567,16 @@ func (t *Trie) Hash() common.Hash { // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { - // Reset accessList at the end of commit - defer func() { - t.accessList = make(map[string][]byte) - }() + defer t.tracer.reset() + // Trie is empty and can be classified into two types of situations: // - The trie was empty and no update happens // - The trie was non-empty and all nodes are dropped if t.root == nil { - return emptyRoot, nil + // Wrap tracked deletions as the return + set := NewNodeSet(t.owner) + t.tracer.markDeletions(set) + return emptyRoot, set } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -568,9 +590,8 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { t.root = hashedNode return rootHash, nil } - nodes := NewNodeSet(t.owner, t.accessList) - h := newCommitter(nodes, collectLeaf) - newRoot := h.Commit(t.root) + h := newCommitter(t.owner, t.tracer, collectLeaf) + newRoot, nodes := h.Commit(t.root) t.root = newRoot return rootHash, nodes } @@ -593,5 +614,5 @@ func (t *Trie) Reset() { t.root = nil t.owner = common.Hash{} t.unhashed = 0 - t.accessList = make(map[string][]byte) + t.tracer.reset() } diff --git a/trie/trie_test.go b/trie/trie_test.go index b87b7d3cd..2fb97eebb 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -410,6 +410,7 @@ func runRandTest(rt randTest) bool { values = make(map[string]string) // tracks content of the trie origTrie = NewEmpty(triedb) ) + tr.tracer = newTracer() for i, step := range rt { // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n", @@ -448,14 +449,21 @@ func runRandTest(rt randTest) bool { root, nodes := tr.Commit(true) // Validity the returned nodeset if nodes != nil { - for path := range nodes.nodes { + for path, node := range nodes.updates.nodes { blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) - got := nodes.accessList[path] + got := node.prev if !bytes.Equal(blob, got) { rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob) panic(rt[i].err) } } + for path, prev := range nodes.deletes { + blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path))) + if !bytes.Equal(blob, prev) { + rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob) + return false + } + } } if nodes != nil { triedb.Update(NewWithNodeSet(nodes)) @@ -469,6 +477,7 @@ func runRandTest(rt randTest) bool { // Enable node tracing. Resolve the root node again explicitly // since it's not captured at the beginning. + tr.tracer = newTracer() tr.resolveAndTrack(root.Bytes(), nil) origTrie = tr.Copy() @@ -481,6 +490,59 @@ func runRandTest(rt randTest) bool { if tr.Hash() != checktr.Hash() { rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") } + case opNodeDiff: + var ( + inserted = tr.tracer.insertList() + deleted = tr.tracer.deleteList() + origIter = origTrie.NodeIterator(nil) + curIter = tr.NodeIterator(nil) + origSeen = make(map[string]struct{}) + curSeen = make(map[string]struct{}) + ) + for origIter.Next(true) { + if origIter.Leaf() { + continue + } + origSeen[string(origIter.Path())] = struct{}{} + } + for curIter.Next(true) { + if curIter.Leaf() { + continue + } + curSeen[string(curIter.Path())] = struct{}{} + } + var ( + insertExp = make(map[string]struct{}) + deleteExp = make(map[string]struct{}) + ) + for path := range curSeen { + _, present := origSeen[path] + if !present { + insertExp[path] = struct{}{} + } + } + for path := range origSeen { + _, present := curSeen[path] + if !present { + deleteExp[path] = struct{}{} + } + } + if len(insertExp) != len(inserted) { + rt[i].err = fmt.Errorf("insert set mismatch") + } + if len(deleteExp) != len(deleted) { + rt[i].err = fmt.Errorf("delete set mismatch") + } + for _, insert := range inserted { + if _, present := insertExp[string(insert)]; !present { + rt[i].err = fmt.Errorf("missing inserted node") + } + } + for _, del := range deleted { + if _, present := deleteExp[string(del)]; !present { + rt[i].err = fmt.Errorf("missing deleted node") + } + } } // Abort the test on error. if rt[i].err != nil { diff --git a/trie/util_test.go b/trie/util_test.go new file mode 100644 index 000000000..01a46553a --- /dev/null +++ b/trie/util_test.go @@ -0,0 +1,304 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" +) + +// Tests if the trie diffs are tracked correctly. +func TestTrieTracer(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + trie.tracer = newTracer() + + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + trie.Hash() + + seen := make(map[string]struct{}) + it := trie.NodeIterator(nil) + for it.Next(true) { + if it.Leaf() { + continue + } + seen[string(it.Path())] = struct{}{} + } + inserted := trie.tracer.insertList() + if len(inserted) != len(seen) { + t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted)) + } + for _, k := range inserted { + _, ok := seen[string(k)] + if !ok { + t.Fatalf("Unexpected inserted node") + } + } + deleted := trie.tracer.deleteList() + if len(deleted) != 0 { + t.Fatalf("Unexpected deleted node tracked %d", len(deleted)) + } + + // Commit the changes and re-create with new root + root, nodes := trie.Commit(false) + if err := db.Update(NewWithNodeSet(nodes)); err != nil { + t.Fatal(err) + } + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + + // Delete all the elements, check deletion set + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + trie.Hash() + + inserted = trie.tracer.insertList() + if len(inserted) != 0 { + t.Fatalf("Unexpected inserted node tracked %d", len(inserted)) + } + deleted = trie.tracer.deleteList() + if len(deleted) != len(seen) { + t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted)) + } + for _, k := range deleted { + _, ok := seen[string(k)] + if !ok { + t.Fatalf("Unexpected inserted node") + } + } +} + +func TestTrieTracerNoop(t *testing.T) { + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie.tracer = newTracer() + + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + if len(trie.tracer.insertList()) != 0 { + t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList())) + } + if len(trie.tracer.deleteList()) != 0 { + t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList())) + } +} + +func TestTrieTracePrevValue(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + trie.tracer = newTracer() + + paths, blobs := trie.tracer.prevList() + if len(paths) != 0 || len(blobs) != 0 { + t.Fatalf("Nothing should be tracked") + } + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + paths, blobs = trie.tracer.prevList() + if len(paths) != 0 || len(blobs) != 0 { + t.Fatalf("Nothing should be tracked") + } + + // Commit the changes and re-create with new root + root, nodes := trie.Commit(false) + if err := db.Update(NewWithNodeSet(nodes)); err != nil { + t.Fatal(err) + } + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + trie.resolveAndTrack(root.Bytes(), nil) + + // Load all nodes in trie + for _, val := range vals { + trie.TryGet([]byte(val.k)) + } + + // Ensure all nodes are tracked by tracer with correct prev-values + iter := trie.NodeIterator(nil) + seen := make(map[string][]byte) + for iter.Next(true) { + // Embedded nodes are ignored since they are not present in + // database. + if iter.Hash() == (common.Hash{}) { + continue + } + seen[string(iter.Path())] = common.CopyBytes(iter.NodeBlob()) + } + + paths, blobs = trie.tracer.prevList() + if len(paths) != len(seen) || len(blobs) != len(seen) { + t.Fatalf("Unexpected tracked values") + } + for i, path := range paths { + blob := blobs[i] + prev, ok := seen[string(path)] + if !ok { + t.Fatalf("Missing node %v", path) + } + if !bytes.Equal(blob, prev) { + t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) + } + } + + // Re-open the trie and iterate the trie, ensure nothing will be tracked. + // Iterator will not link any loaded nodes to trie. + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + + iter = trie.NodeIterator(nil) + for iter.Next(true) { + } + paths, blobs = trie.tracer.prevList() + if len(paths) != 0 || len(blobs) != 0 { + t.Fatalf("Nothing should be tracked") + } + + // Re-open the trie and generate proof for entries, ensure nothing will + // be tracked. Prover will not link any loaded nodes to trie. + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + for _, val := range vals { + trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) + } + paths, blobs = trie.tracer.prevList() + if len(paths) != 0 || len(blobs) != 0 { + t.Fatalf("Nothing should be tracked") + } + + // Delete entries from trie, ensure all previous values are correct. + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + trie.resolveAndTrack(root.Bytes(), nil) + + for _, val := range vals { + trie.TryDelete([]byte(val.k)) + } + paths, blobs = trie.tracer.prevList() + if len(paths) != len(seen) || len(blobs) != len(seen) { + t.Fatalf("Unexpected tracked values") + } + for i, path := range paths { + blob := blobs[i] + prev, ok := seen[string(path)] + if !ok { + t.Fatalf("Missing node %v", path) + } + if !bytes.Equal(blob, prev) { + t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob) + } + } +} + +func TestDeleteAll(t *testing.T) { + db := NewDatabase(rawdb.NewMemoryDatabase()) + trie := NewEmpty(db) + trie.tracer = newTracer() + + // Insert a batch of entries, all the nodes should be marked as inserted + vals := []struct{ k, v string }{ + {"do", "verb"}, + {"ether", "wookiedoo"}, + {"horse", "stallion"}, + {"shaman", "horse"}, + {"doge", "coin"}, + {"dog", "puppy"}, + {"somethingveryoddindeedthis is", "myothernodedata"}, + } + for _, val := range vals { + trie.Update([]byte(val.k), []byte(val.v)) + } + root, set := trie.Commit(false) + if err := db.Update(NewWithNodeSet(set)); err != nil { + t.Fatal(err) + } + // Delete entries from trie, ensure all values are detected + trie, _ = New(TrieID(root), db) + trie.tracer = newTracer() + trie.resolveAndTrack(root.Bytes(), nil) + + // Iterate all existent nodes + var ( + it = trie.NodeIterator(nil) + nodes = make(map[string][]byte) + ) + for it.Next(true) { + if it.Hash() != (common.Hash{}) { + nodes[string(it.Path())] = common.CopyBytes(it.NodeBlob()) + } + } + + // Perform deletion to purge the entire trie + for _, val := range vals { + trie.Delete([]byte(val.k)) + } + root, set = trie.Commit(false) + if root != emptyRoot { + t.Fatalf("Invalid trie root %v", root) + } + for path, blob := range set.deletes { + prev, ok := nodes[path] + if !ok { + t.Fatalf("Extra node deleted %v", []byte(path)) + } + if !bytes.Equal(prev, blob) { + t.Fatalf("Unexpected previous value %v", []byte(path)) + } + } + if len(set.deletes) != len(nodes) { + t.Fatalf("Unexpected deletion set") + } +} diff --git a/trie/utils.go b/trie/utils.go new file mode 100644 index 000000000..5dce65cd2 --- /dev/null +++ b/trie/utils.go @@ -0,0 +1,199 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +// tracer tracks the changes of trie nodes. During the trie operations, +// some nodes can be deleted from the trie, while these deleted nodes +// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted +// nodes won't be removed from the disk at all. Tracer is an auxiliary tool +// used to track all insert and delete operations of trie and capture all +// deleted nodes eventually. +// +// The changed nodes can be mainly divided into two categories: the leaf +// node and intermediate node. The former is inserted/deleted by callers +// while the latter is inserted/deleted in order to follow the rule of trie. +// This tool can track all of them no matter the node is embedded in its +// parent or not, but valueNode is never tracked. +// +// Besides, it's also used for recording the original value of the nodes +// when they are resolved from the disk. The pre-value of the nodes will +// be used to construct reverse-diffs in the future. +// +// Note tracer is not thread-safe, callers should be responsible for handling +// the concurrency issues by themselves. +type tracer struct { + insert map[string]struct{} + delete map[string]struct{} + origin map[string][]byte +} + +// newTracer initializes the tracer for capturing trie changes. +func newTracer() *tracer { + return &tracer{ + insert: make(map[string]struct{}), + delete: make(map[string]struct{}), + origin: make(map[string][]byte), + } +} + +// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally. +// Don't change the value outside of function since it's not deep-copied. +func (t *tracer) onRead(path []byte, val []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + t.origin[string(path)] = val +} + +// onInsert tracks the newly inserted trie node. If it's already in the deletion set +// (resurrected node), then just wipe it from the deletion set as the "untouched". +func (t *tracer) onInsert(path []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + if _, present := t.delete[string(path)]; present { + delete(t.delete, string(path)) + return + } + t.insert[string(path)] = struct{}{} +} + +// onDelete tracks the newly deleted trie node. If it's already +// in the addition set, then just wipe it from the addition set +// as it's untouched. +func (t *tracer) onDelete(path []byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + if _, present := t.insert[string(path)]; present { + delete(t.insert, string(path)) + return + } + t.delete[string(path)] = struct{}{} +} + +// insertList returns the tracked inserted trie nodes in list format. +func (t *tracer) insertList() [][]byte { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ret [][]byte + for path := range t.insert { + ret = append(ret, []byte(path)) + } + return ret +} + +// deleteList returns the tracked deleted trie nodes in list format. +func (t *tracer) deleteList() [][]byte { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ret [][]byte + for path := range t.delete { + ret = append(ret, []byte(path)) + } + return ret +} + +// prevList returns the tracked node blobs in list format. +func (t *tracer) prevList() ([][]byte, [][]byte) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil, nil + } + var ( + paths [][]byte + blobs [][]byte + ) + for path, blob := range t.origin { + paths = append(paths, []byte(path)) + blobs = append(blobs, blob) + } + return paths, blobs +} + +// getPrev returns the cached original value of the specified node. +func (t *tracer) getPrev(path []byte) []byte { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + return t.origin[string(path)] +} + +// reset clears the content tracked by tracer. +func (t *tracer) reset() { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + t.insert = make(map[string]struct{}) + t.delete = make(map[string]struct{}) + t.origin = make(map[string][]byte) +} + +// copy returns a deep copied tracer instance. +func (t *tracer) copy() *tracer { + // Tracer isn't used right now, remove this check later. + if t == nil { + return nil + } + var ( + insert = make(map[string]struct{}) + delete = make(map[string]struct{}) + origin = make(map[string][]byte) + ) + for key := range t.insert { + insert[key] = struct{}{} + } + for key := range t.delete { + delete[key] = struct{}{} + } + for key, val := range t.origin { + origin[key] = val + } + return &tracer{ + insert: insert, + delete: delete, + origin: origin, + } +} + +// markDeletions puts all tracked deletions into the provided nodeset. +func (t *tracer) markDeletions(set *NodeSet) { + // Tracer isn't used right now, remove this check later. + if t == nil { + return + } + for _, path := range t.deleteList() { + // There are a few possibilities for this scenario(the node is deleted + // but not present in database previously), for example the node was + // embedded in the parent and now deleted from the trie. In this case + // it's noop from database's perspective. + val := t.getPrev(path) + if len(val) == 0 { + continue + } + set.markDeleted(path, val) + } +} From 7d4db6960724b9a3b8fea7144a7ccfd5493285c6 Mon Sep 17 00:00:00 2001 From: Sungwoo Kim Date: Tue, 21 Feb 2023 02:35:04 -0500 Subject: [PATCH 16/26] cmd/geth: clarify dumpconfig options (#26729) Clarifies the documentation around dumpconfi Signed-off-by: Sungwoo Kim --- cmd/geth/config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 61b7ed5ec..0b856d1c1 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -47,10 +47,10 @@ var ( dumpConfigCommand = &cli.Command{ Action: dumpConfig, Name: "dumpconfig", - Usage: "Show configuration values", - ArgsUsage: "", + Usage: "Export configuration values in a TOML format", + ArgsUsage: "", Flags: flags.Merge(nodeFlags, rpcFlags), - Description: `The dumpconfig command shows configuration values.`, + Description: `Export configuration values in TOML format (to stdout by default).`, } configFileFlag = &cli.StringFlag{ From 90d25514af32f4e489d71fddf43c7f66c7de517b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 21 Feb 2023 12:17:34 +0200 Subject: [PATCH 17/26] core, eth: merge snap-sync chain download progress logs (#26676) --- core/blockchain.go | 2 +- core/headerchain.go | 2 +- core/rawdb/accessors_chain.go | 26 +++++++------- core/rawdb/ancient_scheme.go | 30 ++++++++-------- core/rawdb/chain_freezer.go | 10 +++--- core/rawdb/chain_iterator.go | 2 +- core/rawdb/database.go | 2 +- eth/downloader/downloader.go | 65 ++++++++++++++++++++++++++++++++++- eth/downloader/queue.go | 9 ++--- 9 files changed, 106 insertions(+), 42 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 98d2e7a77..38a129d4e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1278,7 +1278,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if stats.ignored > 0 { context = append(context, []interface{}{"ignored", stats.ignored}...) } - log.Info("Imported new block receipts", context...) + log.Debug("Imported new block receipts", context...) return 0, nil } diff --git a/core/headerchain.go b/core/headerchain.go index d40d26f72..aed3c720c 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -389,7 +389,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, if res.ignored > 0 { context = append(context, []interface{}{"ignored", res.ignored}...) } - log.Info("Imported new block headers", context...) + log.Debug("Imported new block headers", context...) return res.status, err } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 9b90d8f20..e4dac3407 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -37,7 +37,7 @@ import ( func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { var data []byte db.ReadAncients(func(reader ethdb.AncientReaderOp) error { - data, _ = reader.Ancient(chainFreezerHashTable, number) + data, _ = reader.Ancient(ChainFreezerHashTable, number) if len(data) == 0 { // Get it by hash from leveldb data, _ = db.Get(headerHashKey(number)) @@ -334,7 +334,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu } // read remaining from ancients max := count * 700 - data, err := db.AncientRange(chainFreezerHeaderTable, i+1-count, count, max) + data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, max) if err == nil && uint64(len(data)) == count { // the data is on the order [h, h+1, .., n] -- reordering needed for i := range data { @@ -351,7 +351,7 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu // First try to look up the data in ancient database. Extra hash // comparison is necessary since ancient database only maintains // the canonical data. - data, _ = reader.Ancient(chainFreezerHeaderTable, number) + data, _ = reader.Ancient(ChainFreezerHeaderTable, number) if len(data) > 0 && crypto.Keccak256Hash(data) == hash { return nil } @@ -427,7 +427,7 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number // isCanon is an internal utility method, to check whether the given number/hash // is part of the ancient (canon) set. func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool { - h, err := reader.Ancient(chainFreezerHashTable, number) + h, err := reader.Ancient(ChainFreezerHashTable, number) if err != nil { return false } @@ -443,7 +443,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { - data, _ = reader.Ancient(chainFreezerBodiesTable, number) + data, _ = reader.Ancient(ChainFreezerBodiesTable, number) return nil } // If not, try reading from leveldb @@ -458,7 +458,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { var data []byte db.ReadAncients(func(reader ethdb.AncientReaderOp) error { - data, _ = reader.Ancient(chainFreezerBodiesTable, number) + data, _ = reader.Ancient(ChainFreezerBodiesTable, number) if len(data) > 0 { return nil } @@ -526,7 +526,7 @@ func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { - data, _ = reader.Ancient(chainFreezerDifficultyTable, number) + data, _ = reader.Ancient(ChainFreezerDifficultyTable, number) return nil } // If not, try reading from leveldb @@ -586,7 +586,7 @@ func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawVa db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { - data, _ = reader.Ancient(chainFreezerReceiptTable, number) + data, _ = reader.Ancient(ChainFreezerReceiptTable, number) return nil } // If not, try reading from leveldb @@ -787,19 +787,19 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { num := block.NumberU64() - if err := op.AppendRaw(chainFreezerHashTable, num, block.Hash().Bytes()); err != nil { + if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil { return fmt.Errorf("can't add block %d hash: %v", num, err) } - if err := op.Append(chainFreezerHeaderTable, num, header); err != nil { + if err := op.Append(ChainFreezerHeaderTable, num, header); err != nil { return fmt.Errorf("can't append block header %d: %v", num, err) } - if err := op.Append(chainFreezerBodiesTable, num, block.Body()); err != nil { + if err := op.Append(ChainFreezerBodiesTable, num, block.Body()); err != nil { return fmt.Errorf("can't append block body %d: %v", num, err) } - if err := op.Append(chainFreezerReceiptTable, num, receipts); err != nil { + if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil { return fmt.Errorf("can't append block %d receipts: %v", num, err) } - if err := op.Append(chainFreezerDifficultyTable, num, td); err != nil { + if err := op.Append(ChainFreezerDifficultyTable, num, td); err != nil { return fmt.Errorf("can't append block %d total difficulty: %v", num, err) } return nil diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index 047b504a2..b0428c5f5 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -18,30 +18,30 @@ package rawdb // The list of table names of chain freezer. const ( - // chainFreezerHeaderTable indicates the name of the freezer header table. - chainFreezerHeaderTable = "headers" + // ChainFreezerHeaderTable indicates the name of the freezer header table. + ChainFreezerHeaderTable = "headers" - // chainFreezerHashTable indicates the name of the freezer canonical hash table. - chainFreezerHashTable = "hashes" + // ChainFreezerHashTable indicates the name of the freezer canonical hash table. + ChainFreezerHashTable = "hashes" - // chainFreezerBodiesTable indicates the name of the freezer block body table. - chainFreezerBodiesTable = "bodies" + // ChainFreezerBodiesTable indicates the name of the freezer block body table. + ChainFreezerBodiesTable = "bodies" - // chainFreezerReceiptTable indicates the name of the freezer receipts table. - chainFreezerReceiptTable = "receipts" + // ChainFreezerReceiptTable indicates the name of the freezer receipts table. + ChainFreezerReceiptTable = "receipts" - // chainFreezerDifficultyTable indicates the name of the freezer total difficulty table. - chainFreezerDifficultyTable = "diffs" + // ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table. + ChainFreezerDifficultyTable = "diffs" ) // chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables. // Hashes and difficulties don't compress well. var chainFreezerNoSnappy = map[string]bool{ - chainFreezerHeaderTable: false, - chainFreezerHashTable: true, - chainFreezerBodiesTable: false, - chainFreezerReceiptTable: false, - chainFreezerDifficultyTable: true, + ChainFreezerHeaderTable: false, + ChainFreezerHashTable: true, + ChainFreezerBodiesTable: false, + ChainFreezerReceiptTable: false, + ChainFreezerDifficultyTable: true, } // The list of identifiers of ancient stores. diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 738295cfb..920a0ab59 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -280,19 +280,19 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash } // Write to the batch. - if err := op.AppendRaw(chainFreezerHashTable, number, hash[:]); err != nil { + if err := op.AppendRaw(ChainFreezerHashTable, number, hash[:]); err != nil { return fmt.Errorf("can't write hash to Freezer: %v", err) } - if err := op.AppendRaw(chainFreezerHeaderTable, number, header); err != nil { + if err := op.AppendRaw(ChainFreezerHeaderTable, number, header); err != nil { return fmt.Errorf("can't write header to Freezer: %v", err) } - if err := op.AppendRaw(chainFreezerBodiesTable, number, body); err != nil { + if err := op.AppendRaw(ChainFreezerBodiesTable, number, body); err != nil { return fmt.Errorf("can't write body to Freezer: %v", err) } - if err := op.AppendRaw(chainFreezerReceiptTable, number, receipts); err != nil { + if err := op.AppendRaw(ChainFreezerReceiptTable, number, receipts); err != nil { return fmt.Errorf("can't write receipts to Freezer: %v", err) } - if err := op.AppendRaw(chainFreezerDifficultyTable, number, td); err != nil { + if err := op.AppendRaw(ChainFreezerDifficultyTable, number, td); err != nil { return fmt.Errorf("can't write td to Freezer: %v", err) } diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 85ad88e29..102943516 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -50,7 +50,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) { if i+count > frozen { count = frozen - i } - data, err := db.AncientRange(chainFreezerHashTable, i, count, 32*count) + data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count) if err != nil { log.Crit("Failed to init database from freezer", "err", err) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index ef80c251a..a27b45e4d 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -231,7 +231,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st // If the freezer already contains something, ensure that the genesis blocks // match, otherwise we might mix up freezers across chains and destroy both // the freezer and the key-value store. - frgenesis, err := frdb.Ancient(chainFreezerHashTable, 0) + frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0) if err != nil { return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err) } else if !bytes.Equal(kvgenesis, frgenesis) { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f7790b2d8..bb74efe75 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -154,6 +154,11 @@ type Downloader struct { bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) + + // Progress reporting metrics + syncStartBlock uint64 // Head snap block when Geth was started + syncStartTime time.Time // Time instance when chain sync started + syncLogTime time.Time // Time instance when status was last reported } // LightChain encapsulates functions required to synchronise a light chain. @@ -231,7 +236,9 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl quitCh: make(chan struct{}), SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), stateSyncStart: make(chan *stateSync), + syncStartBlock: chain.CurrentFastBlock().NumberU64(), } + // Create the post-merge skeleton syncer and start the process dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) go dl.stateFetcher() @@ -1614,6 +1621,7 @@ func (d *Downloader) processSnapSyncContent() error { if len(results) == 0 { // If pivot sync is done, stop if oldPivot == nil { + d.reportSnapSyncProgress(true) return sync.Cancel() } // If sync failed, stop @@ -1627,6 +1635,8 @@ func (d *Downloader) processSnapSyncContent() error { if d.chainInsertHook != nil { d.chainInsertHook(results) } + d.reportSnapSyncProgress(false) + // If we haven't downloaded the pivot block yet, check pivot staleness // notifications from the header downloader d.pivotLock.RLock() @@ -1739,7 +1749,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state } default: } - // Retrieve the a batch of results to import + // Retrieve the batch of results to import first, last := results[0].Header, results[len(results)-1].Header log.Debug("Inserting snap-sync blocks", "items", len(results), "firstnum", first.Number, "firsthash", first.Hash(), @@ -1820,3 +1830,56 @@ func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Hea } return headers } + +// reportSnapSyncProgress calculates various status reports and provides it to the user. +func (d *Downloader) reportSnapSyncProgress(force bool) { + // Initialize the sync start time if it's the first time we're reporting + if d.syncStartTime.IsZero() { + d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero + } + // Don't report all the events, just occasionally + if !force && time.Since(d.syncLogTime) < 8*time.Second { + return + } + // Don't report anything until we have a meaningful progress + var ( + headerBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable) + bodyBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable) + receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable) + ) + syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes) + if syncedBytes == 0 { + return + } + var ( + header = d.blockchain.CurrentHeader() + block = d.blockchain.CurrentFastBlock() + ) + syncedBlocks := block.NumberU64() - d.syncStartBlock + if syncedBlocks == 0 { + return + } + // Retrieve the current chain head and calculate the ETA + latest, _, err := d.skeleton.Bounds() + if err != nil { + // We're going to cheat for non-merged networks, but that's fine + latest = d.pivotHeader + } + if latest == nil { + // This should really never happen, but add some defensive code for now. + // TODO(karalabe): Remove it eventually if we don't see it blow. + log.Error("Nil latest block in sync progress report") + return + } + var ( + left = latest.Number.Uint64() - block.NumberU64() + eta = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left) + + progress = fmt.Sprintf("%.2f%%", float64(block.NumberU64())*100/float64(latest.Number.Uint64())) + headers = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString()) + bodies = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.NumberU64()), common.StorageSize(bodyBytes).TerminalString()) + receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.NumberU64()), common.StorageSize(receiptBytes).TerminalString()) + ) + log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta)) + d.syncLogTime = time.Now() +} diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 1f676e655..5af5068c9 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -144,7 +144,7 @@ type queue struct { active *sync.Cond closed bool - lastStatLog time.Time + logTime time.Time // Time instance when status was last reported } // newQueue creates a new download queue for scheduling block retrieval. @@ -390,11 +390,12 @@ func (q *queue) Results(block bool) []*fetchResult { } } // Log some info at certain times - if time.Since(q.lastStatLog) > 60*time.Second { - q.lastStatLog = time.Now() + if time.Since(q.logTime) >= 60*time.Second { + q.logTime = time.Now() + info := q.Stats() info = append(info, "throttle", throttleThreshold) - log.Info("Downloader queue stats", info...) + log.Debug("Downloader queue stats", info...) } return results } From 6d2d12610093774828e2964661767321e1c9c254 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 21 Feb 2023 05:18:33 -0500 Subject: [PATCH 18/26] core: fix accessor mismatch for genesis state (#26747) --- core/genesis.go | 14 +++++++------- core/rawdb/accessors_metadata.go | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 5a6c409e0..8b4e336ed 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -138,7 +138,7 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { // flush is very similar with deriveHash, but the main difference is // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. -func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error { +func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { statedb, err := state.New(common.Hash{}, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { return err @@ -166,15 +166,15 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error { if err != nil { return err } - rawdb.WriteGenesisStateSpec(db, root, blob) + rawdb.WriteGenesisStateSpec(db, blockhash, blob) return nil } // CommitGenesisState loads the stored genesis state with the given block // hash and commits it into the provided trie database. -func CommitGenesisState(db ethdb.Database, triedb *trie.Database, hash common.Hash) error { +func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { var alloc GenesisAlloc - blob := rawdb.ReadGenesisStateSpec(db, hash) + blob := rawdb.ReadGenesisStateSpec(db, blockhash) if len(blob) != 0 { if err := alloc.UnmarshalJSON(blob); err != nil { return err @@ -186,7 +186,7 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, hash common.Ha // - supported networks(mainnet, testnets), recover with defined allocations // - private network, can't recover var genesis *Genesis - switch hash { + switch blockhash { case params.MainnetGenesisHash: genesis = DefaultGenesisBlock() case params.RinkebyGenesisHash: @@ -202,7 +202,7 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, hash common.Ha return errors.New("not found") } } - return alloc.flush(db, triedb) + return alloc.flush(db, triedb, blockhash) } // GenesisAccount is an account in the state of the genesis block. @@ -493,7 +493,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // All the checks has passed, flush the states derived from the genesis // specification as well as the specification itself into the provided // database. - if err := g.Alloc.flush(db, triedb); err != nil { + if err := g.Alloc.flush(db, triedb, block.Hash()); err != nil { return nil, err } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 7a9e6442f..2ff29d1ad 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -82,15 +82,15 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha } // ReadGenesisStateSpec retrieves the genesis state specification based on the -// given genesis hash. -func ReadGenesisStateSpec(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(genesisStateSpecKey(hash)) +// given genesis (block-)hash. +func ReadGenesisStateSpec(db ethdb.KeyValueReader, blockhash common.Hash) []byte { + data, _ := db.Get(genesisStateSpecKey(blockhash)) return data } // WriteGenesisStateSpec writes the genesis state specification into the disk. -func WriteGenesisStateSpec(db ethdb.KeyValueWriter, hash common.Hash, data []byte) { - if err := db.Put(genesisStateSpecKey(hash), data); err != nil { +func WriteGenesisStateSpec(db ethdb.KeyValueWriter, blockhash common.Hash, data []byte) { + if err := db.Put(genesisStateSpecKey(blockhash), data); err != nil { log.Crit("Failed to store genesis state", "err", err) } } From 2f20fd31ee2e7c7c544120e4a5eca8600fe8eebb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 21 Feb 2023 13:10:01 +0200 Subject: [PATCH 19/26] core/rawdb: expose chain freezer constructor without internals (#26748) --- core/rawdb/chain_freezer.go | 4 ++-- core/rawdb/database.go | 2 +- core/rawdb/freezer.go | 6 ++++++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go index 920a0ab59..167afc388 100644 --- a/core/rawdb/chain_freezer.go +++ b/core/rawdb/chain_freezer.go @@ -55,8 +55,8 @@ type chainFreezer struct { } // newChainFreezer initializes the freezer for ancient chain data. -func newChainFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*chainFreezer, error) { - freezer, err := NewFreezer(datadir, namespace, readonly, maxTableSize, tables) +func newChainFreezer(datadir string, namespace string, readonly bool) (*chainFreezer, error) { + freezer, err := NewChainFreezer(datadir, namespace, readonly) if err != nil { return nil, err } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index a27b45e4d..5b7299f38 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -200,7 +200,7 @@ func resolveChainFreezerDir(ancient string) string { // where the chain freezer can be opened. func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { // Create the idle freezer instance - frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, freezerTableSize, chainFreezerNoSnappy) + frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly) if err != nil { return nil, err } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 7bae0a2ea..04c326c4f 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -79,6 +79,12 @@ type Freezer struct { closeOnce sync.Once } +// NewChainFreezer is a small utility method around NewFreezer that sets the +// default parameters for the chain storage. +func NewChainFreezer(datadir string, namespace string, readonly bool) (*Freezer, error) { + return NewFreezer(datadir, namespace, readonly, freezerTableSize, chainFreezerNoSnappy) +} + // NewFreezer creates a freezer instance for maintaining immutable ordered // data according to the given parameters. // From fe01a2f63b8591d8226742726d8d6aaad4cd981e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Tue, 21 Feb 2023 19:12:27 +0800 Subject: [PATCH 20/26] all: use unified emptyRootHash and emptyCodeHash (#26718) The EmptyRootHash and EmptyCodeHash are defined everywhere in the codebase, this PR replaces all of them with unified one defined in core/types package, and also defines constants for TxRoot, WithdrawalsRoot and UncleRoot --- cmd/devp2p/internal/ethtest/snap.go | 16 ++-- cmd/evm/internal/t8ntool/block.go | 4 +- cmd/geth/snapshot.go | 18 ++--- consensus/ethash/algorithm_test.go | 6 +- core/bench_test.go | 4 +- core/genesis.go | 2 +- core/rawdb/accessors_chain_test.go | 28 +++---- core/state/iterator.go | 2 +- core/state/pruner/pruner.go | 13 +--- core/state/snapshot/account.go | 9 ++- core/state/snapshot/conversion.go | 3 +- core/state/snapshot/generate.go | 14 +--- core/state/snapshot/generate_test.go | 108 +++++++++++++-------------- core/state/snapshot/snapshot_test.go | 3 +- core/state/state_object.go | 16 ++-- core/state/statedb.go | 13 +--- core/state/sync_test.go | 5 +- core/state_processor_test.go | 2 +- core/types/block.go | 17 ++--- core/types/hashes.go | 42 +++++++++++ eth/fetcher/block_fetcher.go | 2 +- eth/protocols/snap/handler.go | 3 +- eth/protocols/snap/sync.go | 12 +-- eth/protocols/snap/sync_test.go | 14 ++-- ethclient/ethclient.go | 4 +- les/fetcher/block_fetcher.go | 2 +- les/server_requests.go | 2 +- light/lightchain_test.go | 4 +- trie/database.go | 2 +- trie/iterator.go | 5 +- trie/stacktrie.go | 3 +- trie/sync.go | 5 +- trie/sync_test.go | 3 +- trie/trie.go | 16 +--- trie/trie_test.go | 6 +- trie/util_test.go | 3 +- 36 files changed, 202 insertions(+), 209 deletions(-) create mode 100644 core/types/hashes.go diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go index 754d7850d..f947e4bc9 100644 --- a/cmd/devp2p/internal/ethtest/snap.go +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -23,6 +23,7 @@ import ( "math/rand" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/internal/utesting" @@ -210,13 +211,6 @@ type byteCodesTest struct { expHashes int } -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") -) - // TestSnapGetByteCodes various forms of GetByteCodes requests. func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { // The halfchain import should yield these bytecodes @@ -263,15 +257,15 @@ func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { }, // Empties { - nBytes: 10000, hashes: []common.Hash{emptyRoot}, + nBytes: 10000, hashes: []common.Hash{types.EmptyRootHash}, expHashes: 0, }, { - nBytes: 10000, hashes: []common.Hash{emptyCode}, + nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash}, expHashes: 1, }, { - nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode}, + nBytes: 10000, hashes: []common.Hash{types.EmptyCodeHash, types.EmptyCodeHash, types.EmptyCodeHash}, expHashes: 3, }, // The existing bytecodes @@ -363,7 +357,7 @@ func (s *Suite) TestSnapTrieNodes(t *utesting.T) { for i := 1; i <= 65; i++ { accPaths = append(accPaths, pathTo(i)) } - empty := emptyCode + empty := types.EmptyCodeHash for i, tc := range []trieNodesTest{ { root: s.chain.RootAt(999), diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 1140daa2c..7a9c4b610 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -120,8 +120,8 @@ func (i *bbInput) ToBlock() *types.Block { UncleHash: types.EmptyUncleHash, Coinbase: common.Address{}, Root: i.Header.Root, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, Bloom: i.Header.Bloom, Difficulty: common.Big0, Number: i.Header.Number, diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 7175bb953..0759341fd 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -38,14 +38,6 @@ import ( cli "github.com/urfave/cli/v2" ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256(nil) -) - var ( snapshotCommand = &cli.Command{ Name: "snapshot", @@ -308,7 +300,7 @@ func traverseState(ctx *cli.Context) error { log.Error("Invalid account encountered during traversal", "err", err) return err } - if acc.Root != emptyRoot { + if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(root, common.BytesToHash(accIter.Key), acc.Root) storageTrie, err := trie.NewStateTrie(id, triedb) if err != nil { @@ -324,7 +316,7 @@ func traverseState(ctx *cli.Context) error { return storageIter.Err } } - if !bytes.Equal(acc.CodeHash, emptyCode) { + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) { if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) { log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) return errors.New("missing code") @@ -423,7 +415,7 @@ func traverseRawState(ctx *cli.Context) error { log.Error("Invalid account encountered during traversal", "err", err) return errors.New("invalid account") } - if acc.Root != emptyRoot { + if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(root, common.BytesToHash(accIter.LeafKey()), acc.Root) storageTrie, err := trie.NewStateTrie(id, triedb) if err != nil { @@ -461,7 +453,7 @@ func traverseRawState(ctx *cli.Context) error { return storageIter.Error() } } - if !bytes.Equal(acc.CodeHash, emptyCode) { + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) { if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) { log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) return errors.New("missing code") @@ -536,7 +528,7 @@ func dumpState(ctx *cli.Context) error { CodeHash: account.CodeHash, SecureKey: accIt.Hash().Bytes(), } - if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) { + if !conf.SkipCode && !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash)) } if !conf.SkipStorage { diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 88769d277..3ecd73068 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -709,11 +709,11 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { block := types.NewBlockWithHeader(&types.Header{ Number: big.NewInt(3311058), ParentHash: common.HexToHash("0xd783efa4d392943503f28438ad5830b2d5964696ffc285f338585e9fe0a37a05"), - UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + UncleHash: types.EmptyUncleHash, Coinbase: common.HexToAddress("0xc0ea08a2d404d3172d2add29a45be56da40e2949"), Root: common.HexToHash("0x77d14e10470b5850332524f8cd6f69ad21f070ce92dca33ab2858300242ef2f1"), - TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), - ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, Difficulty: big.NewInt(167925187834220), GasLimit: 4015682, GasUsed: 0, diff --git a/core/bench_test.go b/core/bench_test.go index 7fc8a760f..494998437 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -252,8 +252,8 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { ParentHash: hash, Difficulty: big.NewInt(1), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, } hash = header.Hash() diff --git a/core/genesis.go b/core/genesis.go index 8b4e336ed..269c1486d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -467,7 +467,7 @@ func (g *Genesis) ToBlock() *types.Block { } var withdrawals []*types.Withdrawal if g.Config != nil && g.Config.IsShanghai(g.Timestamp) { - head.WithdrawalsHash = &types.EmptyRootHash + head.WithdrawalsHash = &types.EmptyWithdrawalsHash withdrawals = make([]*types.Withdrawal, 0) } return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)).WithWithdrawals(withdrawals) diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index f7f8d0b08..84eae1d90 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -113,8 +113,8 @@ func TestBlockStorage(t *testing.T) { block := types.NewBlockWithHeader(&types.Header{ Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) @@ -161,8 +161,8 @@ func TestPartialBlockStorage(t *testing.T) { block := types.NewBlockWithHeader(&types.Header{ Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) // Store a header and check that it's not recognized as a block WriteHeader(db, block.Header()) @@ -198,8 +198,8 @@ func TestBadBlockStorage(t *testing.T) { Number: big.NewInt(1), Extra: []byte("bad block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) if entry := ReadBadBlock(db, block.Hash()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) @@ -216,8 +216,8 @@ func TestBadBlockStorage(t *testing.T) { Number: big.NewInt(2), Extra: []byte("bad block two"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) WriteBadBlock(db, blockTwo) @@ -235,8 +235,8 @@ func TestBadBlockStorage(t *testing.T) { Number: big.NewInt(int64(n)), Extra: []byte("bad block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) WriteBadBlock(db, block) } @@ -446,8 +446,8 @@ func TestAncientStorage(t *testing.T) { Number: big.NewInt(0), Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, }) // Ensure nothing non-existent will be read hash, number := block.Hash(), block.NumberU64() @@ -889,8 +889,8 @@ func TestHeadersRLPStorage(t *testing.T) { Number: big.NewInt(int64(i)), Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, ParentHash: pHash, }) chain = append(chain, block) diff --git a/core/state/iterator.go b/core/state/iterator.go index ba7efd465..29c4abfc2 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -117,7 +117,7 @@ func (it *NodeIterator) step() error { if !it.dataIt.Next(true) { it.dataIt = nil } - if !bytes.Equal(account.CodeHash, emptyCodeHash) { + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { it.codeHash = common.BytesToHash(account.CodeHash) addrHash := common.BytesToHash(it.stateIt.LeafKey()) it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index d1ffc4f94..2bd5658e0 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -55,14 +54,6 @@ const ( rangeCompactionThreshold = 100000 ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256(nil) -) - // Config includes all the configurations for pruning. type Config struct { Datadir string // The directory of the state database @@ -446,7 +437,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { return err } - if acc.Root != emptyRoot { + if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db)) if err != nil { @@ -463,7 +454,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { return storageIter.Error() } } - if !bytes.Equal(acc.CodeHash, emptyCode) { + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) { stateBloom.Put(acc.CodeHash, nil) } } diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go index b92e94295..b5634972a 100644 --- a/core/state/snapshot/account.go +++ b/core/state/snapshot/account.go @@ -21,6 +21,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) @@ -41,10 +42,10 @@ func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []by Nonce: nonce, Balance: balance, } - if root != emptyRoot { + if root != types.EmptyRootHash { slim.Root = root[:] } - if !bytes.Equal(codehash, emptyCode[:]) { + if !bytes.Equal(codehash, types.EmptyCodeHash[:]) { slim.CodeHash = codehash } return slim @@ -68,10 +69,10 @@ func FullAccount(data []byte) (Account, error) { return Account{}, err } if len(account.Root) == 0 { - account.Root = emptyRoot[:] + account.Root = types.EmptyRootHash[:] } if len(account.CodeHash) == 0 { - account.CodeHash = emptyCode[:] + account.CodeHash = types.EmptyCodeHash[:] } return account, nil } diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index ebad28fc7..ed7cb963a 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -74,7 +75,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd scheme := snaptree.triedb.Scheme() got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { // Migrate the code first, commit the contract code into the tmp db. - if codeHash != emptyCode { + if codeHash != types.EmptyCodeHash { code := rawdb.ReadCode(src, codeHash) if len(code) == 0 { return common.Hash{}, errors.New("failed to read contract code") diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 46f41cdbe..d46705d31 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -35,12 +35,6 @@ import ( ) var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256Hash(nil) - // accountCheckRange is the upper limit of the number of accounts involved in // each range check. This is a value estimated based on experience. If this // range is too large, the failure rate of range proof will increase. Otherwise, @@ -591,10 +585,10 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er if accMarker == nil || !bytes.Equal(account[:], accMarker) { dataLen := len(val) // Approximate size, saves us a round of RLP-encoding if !write { - if bytes.Equal(acc.CodeHash, emptyCode[:]) { + if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { dataLen -= 32 } - if acc.Root == emptyRoot { + if acc.Root == types.EmptyRootHash { dataLen -= 32 } snapRecoveredAccountMeter.Mark(1) @@ -621,7 +615,7 @@ func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) er // If the iterated account is the contract, create a further loop to // verify or regenerate the contract storage. - if acc.Root == emptyRoot { + if acc.Root == types.EmptyRootHash { ctx.removeStorageAt(account) } else { var storeMarker []byte diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 1bc3421e9..1bac4fd56 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -49,9 +50,9 @@ func TestGeneration(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -83,16 +84,16 @@ func TestGenerateExistentState(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) root, snap := helper.CommitAndGenerate() @@ -235,28 +236,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { helper := newHelper() // Account one, empty root but non-empty database - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) // Account two, non empty root but empty database stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Miss slots { // Account three, non empty root but misses slots in the beginning helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) // Account four, non empty root but misses slots in the middle helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) // Account five, non empty root but misses slots in the end helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) } @@ -264,22 +265,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account six, non empty root but wrong slots in the beginning helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) // Account seven, non empty root but wrong slots in the middle helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) // Account eight, non empty root but wrong slots in the end helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) // Account 9, non empty root but rotated slots helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) } @@ -287,17 +288,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account 10, non empty root but extra slots in the beginning helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) // Account 11, non empty root but extra slots in the middle helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) // Account 12, non empty root but extra slots in the end helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) } @@ -337,25 +338,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Missing accounts, only in the trie { - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning - helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle - helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning + helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle + helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End } // Wrong accounts { - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) } // Extra accounts, only in the snap { - helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()}) // before the beginning - helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle - helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()}) // after the end + helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning + helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle + helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end } root, snap := helper.CommitAndGenerate() @@ -384,9 +385,9 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // without any storage slots to keep the test smaller. helper := newHelper() - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 @@ -419,10 +420,10 @@ func TestGenerateMissingStorageTrie(t *testing.T) { helper := newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() @@ -453,10 +454,10 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { helper := newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() @@ -488,7 +489,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -508,7 +509,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -559,7 +560,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3"}, true, ) - acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -573,8 +574,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { { // 100 accounts exist only in snapshot for i := 0; i < 1000; i++ { - //acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()} - acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(fmt.Sprintf("acc-%d", i))) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -611,7 +611,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { } helper := newHelper() { - acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val) @@ -648,7 +648,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { } helper := newHelper() { - acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val) @@ -687,7 +687,7 @@ func TestGenerateFromEmptySnap(t *testing.T) { for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount(fmt.Sprintf("acc-%d", i), - &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) } root, snap := helper.CommitAndGenerate() t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4 @@ -724,7 +724,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) { for i := 0; i < 8; i++ { accKey := fmt.Sprintf("acc-%d", i) stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(accKey)), stKeys, stVals, true) - helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) var moddedKeys []string var moddedVals []string for ii := 0; ii < 8; ii++ { @@ -816,11 +816,11 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) @@ -851,11 +851,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) - helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) populateDangling(helper.diskdb) diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 82833873c..6893f6001 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -28,6 +28,7 @@ import ( "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) @@ -47,7 +48,7 @@ func randomAccount() []byte { Balance: big.NewInt(rand.Int63()), Nonce: rand.Uint64(), Root: root[:], - CodeHash: emptyCode[:], + CodeHash: types.EmptyCodeHash[:], } data, _ := rlp.EncodeToBytes(a) return data diff --git a/core/state/state_object.go b/core/state/state_object.go index 1550926d3..5dfd3c1b6 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -31,8 +31,6 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -var emptyCodeHash = crypto.Keccak256(nil) - type Code []byte func (c Code) String() string { @@ -95,7 +93,7 @@ type stateObject struct { // empty returns whether the account is considered empty. func (s *stateObject) empty() bool { - return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash) + return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes()) } // newObject creates a state object. @@ -104,10 +102,10 @@ func newObject(db *StateDB, address common.Address, data types.StateAccount) *st data.Balance = new(big.Int) } if data.CodeHash == nil { - data.CodeHash = emptyCodeHash + data.CodeHash = types.EmptyCodeHash.Bytes() } if data.Root == (common.Hash{}) { - data.Root = emptyRoot + data.Root = types.EmptyRootHash } return &stateObject{ db: db, @@ -154,7 +152,7 @@ func (s *stateObject) getTrie(db Database) (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first // We don't prefetch empty tries - if s.data.Root != emptyRoot && s.db.prefetcher != nil { + if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { // When the miner is creating the pending state, there is no // prefetcher s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) @@ -270,7 +268,7 @@ func (s *stateObject) finalise(prefetch bool) { slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure } } - if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != types.EmptyRootHash { s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { @@ -454,7 +452,7 @@ func (s *stateObject) Code(db Database) []byte { if s.code != nil { return s.code } - if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) @@ -472,7 +470,7 @@ func (s *stateObject) CodeSize(db Database) int { if s.code != nil { return len(s.code) } - if bytes.Equal(s.CodeHash(), emptyCodeHash) { + if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return 0 } size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) diff --git a/core/state/statedb.go b/core/state/statedb.go index 3f4bec239..3d8fd15bb 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -41,11 +41,6 @@ type revision struct { journalIndex int } -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") -) - type proofList [][]byte func (n *proofList) Put(key []byte, value []byte) error { @@ -580,10 +575,10 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { Root: common.BytesToHash(acc.Root), } if len(data.CodeHash) == 0 { - data.CodeHash = emptyCodeHash + data.CodeHash = types.EmptyCodeHash.Bytes() } if data.Root == (common.Hash{}) { - data.Root = emptyRoot + data.Root = types.EmptyRootHash } } } @@ -1066,11 +1061,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { s.stateObjectsDestruct = make(map[common.Address]struct{}) } if root == (common.Hash{}) { - root = emptyRoot + root = types.EmptyRootHash } origin := s.originalRoot if origin == (common.Hash{}) { - origin = emptyRoot + origin = types.EmptyRootHash } if root != origin { start := time.Now() diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 84b7bf84e..aff91268a 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -134,8 +134,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { db := trie.NewDatabase(rawdb.NewMemoryDatabase()) - empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil, db.Scheme()) + sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, db.Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -555,7 +554,7 @@ func TestIncompleteStateSync(t *testing.T) { isCode[crypto.Keccak256Hash(acc.code)] = struct{}{} } } - isCode[common.BytesToHash(emptyCodeHash)] = struct{}{} + isCode[types.EmptyCodeHash] = struct{}{} checkTrieConsistency(db, srcRoot) // Create a destination state and sync with the scheduler diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 821b85e9b..59391fa86 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -404,7 +404,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr header.BaseFee = misc.CalcBaseFee(config, parent.Header()) } if config.IsShanghai(header.Time) { - header.WithdrawalsHash = &types.EmptyRootHash + header.WithdrawalsHash = &types.EmptyWithdrawalsHash } var receipts []*types.Receipt // The post-state result doesn't need to be correct (this is a bad block), but we do need something there diff --git a/core/types/block.go b/core/types/block.go index ac8c031ac..82ad3ce99 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -31,11 +31,6 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -var ( - EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - EmptyUncleHash = rlpHash([]*Header(nil)) -) - // A BlockNonce is a 64-bit hash which proves (combined with the // mix-hash) that a sufficient amount of computation has been carried // out on a block. @@ -155,14 +150,14 @@ func (h *Header) SanityCheck() error { // that is: no transactions, no uncles and no withdrawals. func (h *Header) EmptyBody() bool { if h.WithdrawalsHash == nil { - return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash + return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash } - return h.TxHash == EmptyRootHash && h.UncleHash == EmptyUncleHash && *h.WithdrawalsHash == EmptyRootHash + return h.TxHash == EmptyTxsHash && h.UncleHash == EmptyUncleHash && *h.WithdrawalsHash == EmptyWithdrawalsHash } // EmptyReceipts returns true if there are no receipts for this header/block. func (h *Header) EmptyReceipts() bool { - return h.ReceiptHash == EmptyRootHash + return h.ReceiptHash == EmptyReceiptsHash } // Body is a simple (mutable, non-safe) data container for storing and moving @@ -210,7 +205,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { - b.header.TxHash = EmptyRootHash + b.header.TxHash = EmptyTxsHash } else { b.header.TxHash = DeriveSha(Transactions(txs), hasher) b.transactions = make(Transactions, len(txs)) @@ -218,7 +213,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []* } if len(receipts) == 0 { - b.header.ReceiptHash = EmptyRootHash + b.header.ReceiptHash = EmptyReceiptsHash } else { b.header.ReceiptHash = DeriveSha(Receipts(receipts), hasher) b.header.Bloom = CreateBloom(receipts) @@ -250,7 +245,7 @@ func NewBlockWithWithdrawals(header *Header, txs []*Transaction, uncles []*Heade if withdrawals == nil { b.header.WithdrawalsHash = nil } else if len(withdrawals) == 0 { - b.header.WithdrawalsHash = &EmptyRootHash + b.header.WithdrawalsHash = &EmptyWithdrawalsHash } else { h := DeriveSha(Withdrawals(withdrawals), hasher) b.header.WithdrawalsHash = &h diff --git a/core/types/hashes.go b/core/types/hashes.go new file mode 100644 index 000000000..3bad430be --- /dev/null +++ b/core/types/hashes.go @@ -0,0 +1,42 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +var ( + // EmptyRootHash is the known root hash of an empty trie. + EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyUncleHash is the known hash of the empty uncle set. + EmptyUncleHash = rlpHash([]*Header(nil)) // 1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347 + + // EmptyCodeHash is the known hash of the empty EVM bytecode. + EmptyCodeHash = crypto.Keccak256Hash(nil) // c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470 + + // EmptyTxsHash is the known hash of the empty transaction set. + EmptyTxsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyReceiptsHash is the known hash of the empty receipt set. + EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // EmptyWithdrawalsHash is the known hash of the empty withdrawal set. + EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") +) diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 50081d2e5..35608031d 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -599,7 +599,7 @@ func (f *BlockFetcher) loop() { announce.time = task.time // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash { + if header.TxHash == types.EmptyTxsHash && header.UncleHash == types.EmptyUncleHash { log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) block := types.NewBlockWithHeader(header) diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 6941d522a..d7c940044 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -464,7 +465,7 @@ func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [ bytes uint64 ) for _, hash := range req.Hashes { - if hash == emptyCode { + if hash == types.EmptyCodeHash { // Peers should not request the empty code, but if they do, at // least sent them back a correct response without db lookups codes = append(codes, []byte{}) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 052d8eaca..13279fd96 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -46,14 +46,6 @@ import ( "golang.org/x/crypto/sha3" ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyCode is the known hash of the empty EVM bytecode. - emptyCode = crypto.Keccak256Hash(nil) -) - const ( // minRequestSize is the minimum number of bytes to request from a remote peer. // This number is used as the low cap for account and storage range requests. @@ -1833,7 +1825,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { res.task.pend = 0 for i, account := range res.accounts { // Check if the account is a contract with an unknown code - if !bytes.Equal(account.CodeHash, emptyCode[:]) { + if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) { res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{} res.task.needCode[i] = true @@ -1841,7 +1833,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) { } } // Check if the account is a contract with an unknown storage trie - if account.Root != emptyRoot { + if account.Root != types.EmptyRootHash { if !rawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) { // If there was a previous large state retrieval in progress, // don't restart it from scratch. This happens if a sync cycle diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 971605d8c..0a6117972 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -1354,7 +1354,7 @@ func getCodeHash(i uint64) []byte { // getCodeByHash convenience function to lookup the code from the code hash func getCodeByHash(hash common.Hash) []byte { - if hash == emptyCode { + if hash == types.EmptyCodeHash { return nil } for i, h := range codehashes { @@ -1376,7 +1376,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) { value, _ := rlp.EncodeToBytes(&types.StateAccount{ Nonce: i, Balance: big.NewInt(int64(i)), - Root: emptyRoot, + Root: types.EmptyRootHash, CodeHash: getCodeHash(i), }) key := key32(i) @@ -1427,7 +1427,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { value, _ := rlp.EncodeToBytes(&types.StateAccount{ Nonce: uint64(0), Balance: big.NewInt(int64(i)), - Root: emptyRoot, + Root: types.EmptyRootHash, CodeHash: getCodeHash(uint64(i)), }) elem := &kv{boundaries[i].Bytes(), value} @@ -1439,7 +1439,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) { value, _ := rlp.EncodeToBytes(&types.StateAccount{ Nonce: i, Balance: big.NewInt(int64(i)), - Root: emptyRoot, + Root: types.EmptyRootHash, CodeHash: getCodeHash(i), }) elem := &kv{key32(i), value} @@ -1472,7 +1472,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) // Create n accounts in the trie for i := uint64(1); i <= uint64(accounts); i++ { key := key32(i) - codehash := emptyCode[:] + codehash := types.EmptyCodeHash.Bytes() if code { codehash = getCodeHash(i) } @@ -1527,7 +1527,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin // Create n accounts in the trie for i := uint64(1); i <= uint64(accounts); i++ { key := key32(i) - codehash := emptyCode[:] + codehash := types.EmptyCodeHash.Bytes() if code { codehash = getCodeHash(i) } @@ -1678,7 +1678,7 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { log.Crit("Invalid account encountered during snapshot creation", "err", err) } accounts++ - if acc.Root != emptyRoot { + if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root) storeTrie, err := trie.NewStateTrie(id, triedb) if err != nil { diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 1d2df5466..460035f36 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -131,10 +131,10 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface if head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 { return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles") } - if head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 { + if head.TxHash == types.EmptyTxsHash && len(body.Transactions) > 0 { return nil, fmt.Errorf("server returned non-empty transaction list but block header indicates no transactions") } - if head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 { + if head.TxHash != types.EmptyTxsHash && len(body.Transactions) == 0 { return nil, fmt.Errorf("server returned empty transaction list but block header indicates transactions") } // Load uncles because they are not included in the block response. diff --git a/les/fetcher/block_fetcher.go b/les/fetcher/block_fetcher.go index c76f20ced..085ecb2d6 100644 --- a/les/fetcher/block_fetcher.go +++ b/les/fetcher/block_fetcher.go @@ -548,7 +548,7 @@ func (f *BlockFetcher) loop() { announce.time = task.time // If the block is empty (header only), short circuit into the final import queue - if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash { + if header.TxHash == types.EmptyTxsHash && header.UncleHash == types.EmptyUncleHash { log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) block := types.NewBlockWithHeader(header) diff --git a/les/server_requests.go b/les/server_requests.go index 3563bf93c..033b11d79 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -348,7 +348,7 @@ func handleGetReceipts(msg Decoder) (serveRequestFn, uint64, uint64, error) { // Retrieve the requested block's receipts, skipping if unknown to us results := bc.GetReceiptsByHash(hash) if results == nil { - if header := bc.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + if header := bc.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyReceiptsHash { p.bumpInvalid() continue } diff --git a/light/lightchain_test.go b/light/lightchain_test.go index 8600e5634..d19713dc2 100644 --- a/light/lightchain_test.go +++ b/light/lightchain_test.go @@ -253,8 +253,8 @@ func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types. Number: big.NewInt(int64(i + 1)), Difficulty: big.NewInt(int64(difficulty)), UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, + TxHash: types.EmptyTxsHash, + ReceiptHash: types.EmptyReceiptsHash, } if i == 0 { header.ParentHash = genesis.Hash() diff --git a/trie/database.go b/trie/database.go index 74247d59c..895ffdf89 100644 --- a/trie/database.go +++ b/trie/database.go @@ -808,7 +808,7 @@ func (db *Database) Update(nodes *MergedNodeSet) error { if err := rlp.DecodeBytes(n.blob, &account); err != nil { return err } - if account.Root != emptyRoot { + if account.Root != types.EmptyRootHash { db.reference(account.Root, n.parent) } } diff --git a/trie/iterator.go b/trie/iterator.go index aa621cd54..f42beec4a 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -22,6 +22,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) // NodeResolver is used for looking up trie nodes before reaching into the real @@ -160,7 +161,7 @@ func (e seekError) Error() string { } func newNodeIterator(trie *Trie, start []byte) NodeIterator { - if trie.Hash() == emptyRoot { + if trie.Hash() == types.EmptyRootHash { return &nodeIterator{ trie: trie, err: errIteratorEnd, @@ -302,7 +303,7 @@ func (it *nodeIterator) seek(prefix []byte) error { func (it *nodeIterator) init() (*nodeIteratorState, error) { root := it.trie.Hash() state := &nodeIteratorState{node: it.trie.root, index: -1} - if root != emptyRoot { + if root != types.EmptyRootHash { state.hash = root } return state, state.resolve(it, nil) diff --git a/trie/stacktrie.go b/trie/stacktrie.go index fb8cc0d76..e7b3171af 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -25,6 +25,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -407,7 +408,7 @@ func (st *StackTrie) hashRec(hasher *hasher, path []byte) { return case emptyNode: - st.val = emptyRoot.Bytes() + st.val = types.EmptyRootHash.Bytes() st.key = st.key[:0] st.nodeType = hashedNode return diff --git a/trie/sync.go b/trie/sync.go index 4bf735c02..4f5584599 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -184,7 +185,7 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb // hex format and contain all the parent path if it's layered trie node. func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, parentPath []byte, callback LeafCallback) { // Short circuit if the trie is empty or already known - if root == emptyRoot { + if root == types.EmptyRootHash { return } if s.membatch.hasNode(path) { @@ -217,7 +218,7 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, par // as is. func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash, parentPath []byte) { // Short circuit if the entry is empty or already known - if hash == emptyState { + if hash == types.EmptyCodeHash { return } if s.membatch.hasCode(hash) { diff --git a/trie/sync_test.go b/trie/sync_test.go index 709f22949..fc871a22c 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb/memorydb" ) @@ -104,7 +105,7 @@ func TestEmptySync(t *testing.T) { dbA := NewDatabase(rawdb.NewMemoryDatabase()) dbB := NewDatabase(rawdb.NewMemoryDatabase()) emptyA, _ := New(TrieID(common.Hash{}), dbA) - emptyB, _ := New(TrieID(emptyRoot), dbB) + emptyB, _ := New(TrieID(types.EmptyRootHash), dbB) for i, trie := range []*Trie{emptyA, emptyB} { sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB}[i].Scheme()) diff --git a/trie/trie.go b/trie/trie.go index c467ac476..cf9108f10 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -23,18 +23,10 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) -var ( - // emptyRoot is the known root hash of an empty trie. - emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - - // emptyState is the known hash of an empty state trie entry. - emptyState = crypto.Keccak256Hash(nil) -) - // Trie is a Merkle Patricia Trie. Use New to create a trie that sits on // top of a database. Whenever trie performs a commit operation, the generated // nodes will be gathered and returned in a set. Once the trie is committed, @@ -91,7 +83,7 @@ func New(id *ID, db NodeReader) (*Trie, error) { reader: reader, //tracer: newTracer(), } - if id.Root != (common.Hash{}) && id.Root != emptyRoot { + if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash { rootnode, err := trie.resolveAndTrack(id.Root[:], nil) if err != nil { return nil, err @@ -576,7 +568,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { // Wrap tracked deletions as the return set := NewNodeSet(t.owner) t.tracer.markDeletions(set) - return emptyRoot, set + return types.EmptyRootHash, set } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -599,7 +591,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet) { // hashRoot calculates the root hash of the given trie func (t *Trie) hashRoot() (node, node, error) { if t.root == nil { - return hashNode(emptyRoot.Bytes()), nil, nil + return hashNode(types.EmptyRootHash.Bytes()), nil, nil } // If the number of changes is below 100, we let one thread handle it h := newHasher(t.unhashed >= 100) diff --git a/trie/trie_test.go b/trie/trie_test.go index 2fb97eebb..2f56c89cd 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -47,7 +47,7 @@ func init() { func TestEmptyTrie(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) res := trie.Hash() - exp := emptyRoot + exp := types.EmptyRootHash if res != exp { t.Errorf("expected %x got %x", exp, res) } @@ -431,7 +431,7 @@ func runRandTest(rt randTest) bool { } case opProve: hash := tr.Hash() - if hash == emptyRoot { + if hash == types.EmptyRootHash { continue } proofDb := rawdb.NewMemoryDatabase() @@ -718,7 +718,7 @@ func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) { for i := 0; i < len(accounts); i++ { var ( nonce = uint64(random.Int63()) - root = emptyRoot + root = types.EmptyRootHash code = crypto.Keccak256(nil) ) // The big.Rand function is not deterministic with regards to 64 vs 32 bit systems, diff --git a/trie/util_test.go b/trie/util_test.go index 01a46553a..8d925a16a 100644 --- a/trie/util_test.go +++ b/trie/util_test.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" ) // Tests if the trie diffs are tracked correctly. @@ -286,7 +287,7 @@ func TestDeleteAll(t *testing.T) { trie.Delete([]byte(val.k)) } root, set = trie.Commit(false) - if root != emptyRoot { + if root != types.EmptyRootHash { t.Fatalf("Invalid trie root %v", root) } for path, blob := range set.deletes { From 78e00bf68d5bee5da4ca031d0f716b7c2c17b8b3 Mon Sep 17 00:00:00 2001 From: philip-morlier Date: Tue, 21 Feb 2023 10:29:25 -0800 Subject: [PATCH 21/26] Manual touches to preserve compatability of other networks with utils as well as fix a failing test in core/rawdb. --- core/plugin_hooks.go | 3 +- core/rawdb/freezer_test.go | 6 + eth/tracers/plugin_hooks.go | 4 +- go.mod | 4 +- go.sum | 5 +- plugins/packages/blockupdates/main.go | 306 -------------------------- plugins/wrappers/wrappers.go | 57 ++--- 7 files changed, 43 insertions(+), 342 deletions(-) delete mode 100644 plugins/packages/blockupdates/main.go diff --git a/core/plugin_hooks.go b/core/plugin_hooks.go index 14120903f..2324f4ae2 100644 --- a/core/plugin_hooks.go +++ b/core/plugin_hooks.go @@ -249,9 +249,10 @@ func (mt *metaTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, sc tracer.CaptureFault(pc, core.OpCode(op), gas, cost, wrappers.NewWrappedScopeContext(scope), depth, err) } } +// passing zero as a dummy value is foundation PluGeth only, it is being done to preserve compatability with other networks func (mt *metaTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { for _, tracer := range mt.tracers { - tracer.CaptureEnd(output, gasUsed, err) + tracer.CaptureEnd(output, gasUsed, 0, err) } } diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go index 5896e43ce..5998ffe4d 100644 --- a/core/rawdb/freezer_test.go +++ b/core/rawdb/freezer_test.go @@ -26,12 +26,18 @@ import ( "path" "sync" "testing" + "time" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/require" ) +// seeding random below, as well as the time import are PluGeth injections to enable this test to pass as it fails in geth v1.11.1 +func init() { + rand.Seed(time.Now().Unix()) +} + var freezerTestTableDef = map[string]bool{"test": true} func TestFreezerModify(t *testing.T) { diff --git a/eth/tracers/plugin_hooks.go b/eth/tracers/plugin_hooks.go index 68948df3b..a297bb553 100644 --- a/eth/tracers/plugin_hooks.go +++ b/eth/tracers/plugin_hooks.go @@ -2,6 +2,7 @@ package tracers import ( "reflect" + "math/big" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -37,7 +38,8 @@ func GetPluginTracer(pl *plugins.PluginLoader, name string) (func(*state.StateDB Coinbase: core.Address(vmctx.Coinbase), GasLimit: vmctx.GasLimit, BlockNumber: vmctx.BlockNumber, - Time: vmctx.Time, + // casting int64 zero as bigInt is foundation PluGeth only, it is being done to preserve compatability with other networks + Time: new(big.Int).SetInt64(int64(vmctx.Time)), Difficulty: vmctx.Difficulty, BaseFee: vmctx.BaseFee, })) diff --git a/go.mod b/go.mod index 59c646648..4596d45c0 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/mattn/go-isatty v0.0.16 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 - github.com/openrelayxyz/plugeth-utils v0.0.22 + github.com/openrelayxyz/plugeth-utils v0.0.23 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/prometheus/tsdb v0.7.1 github.com/rs/cors v1.7.0 @@ -71,8 +71,6 @@ require ( gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce ) -require github.com/hashicorp/golang-lru v0.5.1 - require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect diff --git a/go.sum b/go.sum index 2cf7ebdc7..e405bc759 100644 --- a/go.sum +++ b/go.sum @@ -292,7 +292,6 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= @@ -452,8 +451,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/openrelayxyz/plugeth-utils v0.0.22 h1:0sO1gA+m48uIvpys5wyv5ZqByqdaBZZdRzkujq0gZik= -github.com/openrelayxyz/plugeth-utils v0.0.22/go.mod h1:zGm3/elx1rCcrYAFUQ5/He7AG/n039/3xYg0/Q8nqcQ= +github.com/openrelayxyz/plugeth-utils v0.0.23 h1:TKNGnRPWZ3mXNUHfjr1iRjsto2r6ngNZb95dJLj2j5w= +github.com/openrelayxyz/plugeth-utils v0.0.23/go.mod h1:zGm3/elx1rCcrYAFUQ5/He7AG/n039/3xYg0/Q8nqcQ= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= diff --git a/plugins/packages/blockupdates/main.go b/plugins/packages/blockupdates/main.go deleted file mode 100644 index 27798f82c..000000000 --- a/plugins/packages/blockupdates/main.go +++ /dev/null @@ -1,306 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "context" - "encoding/json" - lru "github.com/hashicorp/golang-lru" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/plugins" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/plugins/interfaces" - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/urfave/cli/v2" - "io" -) - - -var ( - pl *plugins.PluginLoader - backend interfaces.Backend - lastBlock common.Hash - cache *lru.Cache - blockEvents event.Feed -) - - -// stateUpdate will be used to track state updates -type stateUpdate struct { - Destructs map[common.Hash]struct{} - Accounts map[common.Hash][]byte - Storage map[common.Hash]map[common.Hash][]byte -} - - -// kvpair is used for RLP encoding of maps, as maps cannot be RLP encoded directly -type kvpair struct { - Key common.Hash - Value []byte -} - -// storage is used for RLP encoding two layers of maps, as maps cannot be RLP encoded directly -type storage struct { - Account common.Hash - Data []kvpair -} - -// storedStateUpdate is an RLP encodable version of stateUpdate -type storedStateUpdate struct { - Destructs []common.Hash - Accounts []kvpair - Storage []storage -} - - -// MarshalJSON represents the stateUpdate as JSON for RPC calls -func (su *stateUpdate) MarshalJSON() ([]byte, error) { - result := make(map[string]interface{}) - destructs := make([]common.Hash, 0, len(su.Destructs)) - for k := range su.Destructs { - destructs = append(destructs, k) - } - result["destructs"] = destructs - accounts := make(map[common.Hash]hexutil.Bytes) - for k, v := range su.Accounts { - accounts[k] = hexutil.Bytes(v) - } - result["accounts"] = accounts - storage := make(map[common.Hash]map[common.Hash]hexutil.Bytes) - for m, s := range su.Storage { - storage[m] = make(map[common.Hash]hexutil.Bytes) - for k, v := range s { - storage[m][k] = hexutil.Bytes(v) - } - } - result["storage"] = storage - return json.Marshal(result) -} - -// EncodeRLP converts the stateUpdate to a storedStateUpdate, and RLP encodes the result for storage -func (su *stateUpdate) EncodeRLP(w io.Writer) error { - destructs := make([]common.Hash, 0, len(su.Destructs)) - for k := range su.Destructs { - destructs = append(destructs, k) - } - accounts := make([]kvpair, 0, len(su.Accounts)) - for k, v := range su.Accounts { - accounts = append(accounts, kvpair{k, v}) - } - s := make([]storage, 0, len(su.Storage)) - for a, m := range su.Storage { - accountStorage := storage{a, make([]kvpair, 0, len(m))} - for k, v := range m { - accountStorage.Data = append(accountStorage.Data, kvpair{k, v}) - } - s = append(s, accountStorage) - } - return rlp.Encode(w, storedStateUpdate{destructs, accounts, s}) -} - -// DecodeRLP takes a byte stream, decodes it to a storedStateUpdate, the n converts that into a stateUpdate object -func (su *stateUpdate) DecodeRLP(s *rlp.Stream) error { - ssu := storedStateUpdate{} - if err := s.Decode(&ssu); err != nil { return err } - su.Destructs = make(map[common.Hash]struct{}) - for _, s := range ssu.Destructs { - su.Destructs[s] = struct{}{} - } - su.Accounts = make(map[common.Hash][]byte) - for _, kv := range ssu.Accounts { - su.Accounts[kv.Key] = kv.Value - } - su.Storage = make(map[common.Hash]map[common.Hash][]byte) - for _, s := range ssu.Storage { - su.Storage[s.Account] = make(map[common.Hash][]byte) - for _, kv := range s.Data { - su.Storage[s.Account][kv.Key] = kv.Value - } - } - return nil -} - - -// Initialize does initial setup of variables as the plugin is loaded. -func Initialize(ctx *cli.Context, loader *plugins.PluginLoader) { - pl = loader - cache, _ = lru.New(128) // TODO: Make size configurable - if !ctx.GlobalBool(utils.SnapshotFlag.Name) { - log.Warn("Snapshots are required for StateUpdate plugins, but are currently disabled. State Updates will be unavailable") - } - log.Info("Loaded block updater plugin") -} - - -// InitializeNode is invoked by the plugin loader when the node and Backend are -// ready. We will track the backend to provide access to blocks and other -// useful information. -func InitializeNode(stack *node.Node, b interfaces.Backend) { - backend = b -} - - -// StateUpdate gives us updates about state changes made in each block. We -// cache them for short term use, and write them to disk for the longer term. -func StateUpdate(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) { - su := &stateUpdate{ - Destructs: destructs, - Accounts: accounts, - Storage: storage, - } - cache.Add(blockRoot, su) - data, _ := rlp.EncodeToBytes(su) - backend.ChainDb().Put(append([]byte("su"), blockRoot.Bytes()...), data) -} - -// AppendAncient removes our state update records from leveldb as the -// corresponding blocks are moved from leveldb to the ancients database. At -// some point in the future, we may want to look at a way to move the state -// updates to an ancients table of their own for longer term retention. -func AppendAncient(number uint64, hash, headerBytes, body, receipts, td []byte) { - header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(headerBytes), header); err != nil { - log.Warn("Could not decode ancient header", "block", number) - return - } - backend.ChainDb().Delete(append([]byte("su"), header.Root.Bytes()...)) -} - - -// NewHead is invoked when a new block becomes the latest recognized block. We -// use this to notify the blockEvents channel of new blocks, as well as invoke -// the BlockUpdates hook on downstream plugins. -// TODO: We're not necessarily handling reorgs properly, which may result in -// some blocks not being emitted through this hook. -func NewHead(block *types.Block, hash common.Hash, logs []*types.Log) { - if pl == nil { - log.Warn("Attempting to emit NewHead, but default PluginLoader has not been initialized") - return - } - result, err := blockUpdates(context.Background(), block) - if err != nil { - log.Error("Could not serialize block", "err", err, "hash", block.Hash()) - return - } - blockEvents.Send(result) - - receipts, err := backend.GetReceipts(context.Background(), block.Hash()) - var su *stateUpdate - if v, ok := cache.Get(block.Root()); ok { - su = v.(*stateUpdate) - } else { - data, err := backend.ChainDb().Get(append([]byte("su"), block.Root().Bytes()...)) - if err != nil { - log.Error("StateUpdate unavailable for block", "hash", block.Hash()) - return - } - su = &stateUpdate{} - if err := rlp.DecodeBytes(data, su); err != nil { - log.Error("StateUpdate unavailable for block", "hash", block.Hash()) - return - } - } - fnList := pl.Lookup("BlockUpdates", func(item interface{}) bool { - _, ok := item.(func(*types.Block, []*types.Log, types.Receipts, map[common.Hash]struct{}, map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte)) - return ok - }) - for _, fni := range fnList { - if fn, ok := fni.(func(*types.Block, []*types.Log, types.Receipts, map[common.Hash]struct{}, map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte)); ok { - fn(block, logs, receipts, su.Destructs, su.Accounts, su.Storage) - } - } -} - - -// BlockUpdates is a service that lets clients query for block updates for a -// given block by hash or number, or subscribe to new block upates. -type BlockUpdates struct{ - backend interfaces.Backend -} - -// blockUpdate handles the serialization of a block -func blockUpdates(ctx context.Context, block *types.Block) (map[string]interface{}, error) { - result, err := ethapi.RPCMarshalBlock(block, true, true) - if err != nil { return nil, err } - result["receipts"], err = backend.GetReceipts(ctx, block.Hash()) - if err != nil { return nil, err } - if v, ok := cache.Get(block.Root()); ok { - result["stateUpdates"] = v - return result, nil - } - data, err := backend.ChainDb().Get(append([]byte("su"), block.Root().Bytes()...)) - if err != nil { return nil, fmt.Errorf("State Updates unavailable for block %#x", block.Hash())} - su := &stateUpdate{} - if err := rlp.DecodeBytes(data, su); err != nil { return nil, fmt.Errorf("State updates unavailable for block %#x", block.Hash()) } - result["stateUpdates"] = su - return result, nil -} - -// BlockUpdatesByNumber retrieves a block by number, gets receipts and state -// updates, and serializes the response. -func (b *BlockUpdates) BlockUpdatesByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { - block, err := b.backend.BlockByNumber(ctx, number) - if err != nil { return nil, err } - return blockUpdates(ctx, block) -} - -// BlockUpdatesByHash retrieves a block by hash, gets receipts and state -// updates, and serializes the response. -func (b *BlockUpdates) BlockUpdatesByHash(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { - block, err := b.backend.BlockByHash(ctx, hash) - if err != nil { return nil, err } - return blockUpdates(ctx, block) -} - -// BlockUpdates allows clients to subscribe to notifications of new blocks -// along with receipts and state updates. -func (b *BlockUpdates) BlockUpdates(ctx context.Context) (*rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported - } - - var ( - rpcSub = notifier.CreateSubscription() - blockDataChan = make(chan map[string]interface{}, 1000) - ) - - sub := blockEvents.Subscribe(blockDataChan) - go func() { - - for { - select { - case b := <-blockDataChan: - notifier.Notify(rpcSub.ID, b) - case <-rpcSub.Err(): // client send an unsubscribe request - sub.Unsubscribe() - return - case <-notifier.Closed(): // connection dropped - sub.Unsubscribe() - return - } - } - }() - - return rpcSub, nil -} - - -// GetAPIs exposes the BlockUpdates service under the cardinal namespace. -func GetAPIs(stack *node.Node, backend interfaces.Backend) []rpc.API { - return []rpc.API{ - { - Namespace: "cardinal", - Version: "1.0", - Service: &BlockUpdates{backend}, - Public: true, - }, - } -} diff --git a/plugins/wrappers/wrappers.go b/plugins/wrappers/wrappers.go index b636ed25a..5458cd4a5 100644 --- a/plugins/wrappers/wrappers.go +++ b/plugins/wrappers/wrappers.go @@ -100,8 +100,9 @@ func (w WrappedTracer) CaptureExit(output []byte, gasUsed uint64, err error) { func (w WrappedTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { w.r.CaptureFault(pc, core.OpCode(op), gas, cost, &WrappedScopeContext{scope}, depth, err) } +// passing zero as a dummy value is foundation PluGeth only, it is being done to preserve compatability with other networks func (w WrappedTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { - w.r.CaptureEnd(output, gasUsed, err) + w.r.CaptureEnd(output, gasUsed, 0, err) } func (w WrappedTracer) GetResult() (json.RawMessage, error) { data, err := w.r.Result() @@ -230,30 +231,30 @@ func (n *Node) Close() error { return n.n.Close() } -// type WrappedBlockContext struct { -// b vm.BlockContext -// } -// -// //type WrappedBlockContext vm.BlockContext -// -// func NewWrappedBlockContext(c vm.BlockContext) *WrappedBlockContext { -// return &WrappedBlockContext{c} -// } -// func (w *WrappedBlockContext) Coinbase() core.Address { -// return core.Address(w.b.Coinbase) -// } -// func (w *WrappedBlockContext) GasLimit() uint64 { -// return w.b.GasLimit -// } -// func (w *WrappedBlockContext) BlockNumber() *big.Int { -// return w.b.BlockNumber -// } -// func (w *WrappedBlockContext) Time() *big.Int { -// return w.b.Time -// } -// func (w *WrappedBlockContext) Difficulty() *big.Int { -// return w.b.Difficulty -// } -// func (w *WrappedBlockContext) BaseFee() *big.Int { -// return w.b.BaseFee -// } +type WrappedBlockContext struct { + b vm.BlockContext +} + +// type WrappedBlockContext vm.BlockContext + +func NewWrappedBlockContext(c vm.BlockContext) *WrappedBlockContext { + return &WrappedBlockContext{c} +} +func (w *WrappedBlockContext) Coinbase() core.Address { + return core.Address(w.b.Coinbase) +} +func (w *WrappedBlockContext) GasLimit() uint64 { + return w.b.GasLimit +} +func (w *WrappedBlockContext) BlockNumber() *big.Int { + return w.b.BlockNumber +} +func (w *WrappedBlockContext) Time() *big.Int { + return new(big.Int).SetInt64(int64(w.b.Time)) +} +func (w *WrappedBlockContext) Difficulty() *big.Int { + return w.b.Difficulty +} +func (w *WrappedBlockContext) BaseFee() *big.Int { + return w.b.BaseFee +} From 4034c675be4cf95e1c51baec5f1babe809e45a3c Mon Sep 17 00:00:00 2001 From: Yier <90763233+yierx@users.noreply.github.com> Date: Wed, 22 Feb 2023 19:06:43 +0800 Subject: [PATCH 22/26] eth/filters: fix a breaking change and return rpctransaction (#26757) * eth/filters: fix a breaking change and return rpctransaction * eth/filters: fix test cases --------- Co-authored-by: Catror --- eth/filters/api.go | 26 ++++++++-- eth/filters/filter_system_test.go | 79 +++++++++++++++++++++++++++---- 2 files changed, 90 insertions(+), 15 deletions(-) diff --git a/eth/filters/api.go b/eth/filters/api.go index a30eb28be..f9ae70eba 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -39,6 +39,7 @@ type filter struct { typ Type deadline *time.Timer // filter is inactive when deadline triggers hashes []common.Hash + fullTx bool txs []*types.Transaction crit FilterCriteria logs []*types.Log @@ -103,14 +104,14 @@ func (api *FilterAPI) timeoutLoop(timeout time.Duration) { // // It is part of the filter package because this filter can be used through the // `eth_getFilterChanges` polling method that is also used for log filters. -func (api *FilterAPI) NewPendingTransactionFilter() rpc.ID { +func (api *FilterAPI) NewPendingTransactionFilter(fullTx *bool) rpc.ID { var ( pendingTxs = make(chan []*types.Transaction) pendingTxSub = api.events.SubscribePendingTxs(pendingTxs) ) api.filtersMu.Lock() - api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), txs: make([]*types.Transaction, 0), s: pendingTxSub} + api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, fullTx: fullTx != nil && *fullTx, deadline: time.NewTimer(api.timeout), txs: make([]*types.Transaction, 0), s: pendingTxSub} api.filtersMu.Unlock() go func() { @@ -412,6 +413,9 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { api.filtersMu.Lock() defer api.filtersMu.Unlock() + chainConfig := api.sys.backend.ChainConfig() + latest := api.sys.backend.CurrentHeader() + if f, found := api.filters[id]; found { if !f.deadline.Stop() { // timer expired but filter is not yet removed in timeout loop @@ -426,9 +430,21 @@ func (api *FilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { f.hashes = nil return returnHashes(hashes), nil case PendingTransactionsSubscription: - txs := f.txs - f.txs = nil - return txs, nil + if f.fullTx { + txs := make([]*ethapi.RPCTransaction, 0, len(f.txs)) + for _, tx := range f.txs { + txs = append(txs, ethapi.NewRPCPendingTransaction(tx, latest, chainConfig)) + } + f.txs = nil + return txs, nil + } else { + hashes := make([]common.Hash, 0, len(f.txs)) + for _, tx := range f.txs { + hashes = append(hashes, tx.Hash()) + } + f.txs = nil + return hashes, nil + } case LogsSubscription, MinedAndPendingLogsSubscription: logs := f.logs f.logs = nil diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 974483b68..b70b0158a 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) @@ -52,11 +53,12 @@ type testBackend struct { } func (b *testBackend) ChainConfig() *params.ChainConfig { - panic("implement me") + return params.TestChainConfig } func (b *testBackend) CurrentHeader() *types.Header { - panic("implement me") + hdr, _ := b.HeaderByNumber(context.TODO(), rpc.LatestBlockNumber) + return hdr } func (b *testBackend) ChainDb() ethdb.Database { @@ -256,10 +258,10 @@ func TestPendingTxFilter(t *testing.T) { types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), } - txs []*types.Transaction + hashes []common.Hash ) - fid0 := api.NewPendingTransactionFilter() + fid0 := api.NewPendingTransactionFilter(nil) time.Sleep(1 * time.Second) backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) @@ -271,7 +273,64 @@ func TestPendingTxFilter(t *testing.T) { t.Fatalf("Unable to retrieve logs: %v", err) } - tx := results.([]*types.Transaction) + h := results.([]common.Hash) + hashes = append(hashes, h...) + if len(hashes) >= len(transactions) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(hashes) != len(transactions) { + t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) + return + } + for i := range hashes { + if hashes[i] != transactions[i].Hash() { + t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) + } + } +} + +// TestPendingTxFilterFullTx tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. +func TestPendingTxFilterFullTx(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + + transactions = []*types.Transaction{ + types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + } + + txs []*ethapi.RPCTransaction + ) + + fullTx := true + fid0 := api.NewPendingTransactionFilter(&fullTx) + + time.Sleep(1 * time.Second) + backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) + + timeout := time.Now().Add(1 * time.Second) + for { + results, err := api.GetFilterChanges(fid0) + if err != nil { + t.Fatalf("Unable to retrieve logs: %v", err) + } + + tx := results.([]*ethapi.RPCTransaction) txs = append(txs, tx...) if len(txs) >= len(transactions) { break @@ -289,8 +348,8 @@ func TestPendingTxFilter(t *testing.T) { return } for i := range txs { - if txs[i].Hash() != transactions[i].Hash() { - t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash()) + if txs[i].Hash != transactions[i].Hash() { + t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), txs[i].Hash) } } } @@ -854,15 +913,15 @@ func TestPendingTxFilterDeadlock(t *testing.T) { // timeout either in 100ms or 200ms fids := make([]rpc.ID, 20) for i := 0; i < len(fids); i++ { - fid := api.NewPendingTransactionFilter() + fid := api.NewPendingTransactionFilter(nil) fids[i] = fid // Wait for at least one tx to arrive in filter for { - txs, err := api.GetFilterChanges(fid) + hashes, err := api.GetFilterChanges(fid) if err != nil { t.Fatalf("Filter should exist: %v\n", err) } - if len(txs.([]*types.Transaction)) > 0 { + if len(hashes.([]common.Hash)) > 0 { break } runtime.Gosched() From f86f04864603d4d1398e134fae319531453b67e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 22 Feb 2023 13:55:09 +0200 Subject: [PATCH 23/26] common/math: allow HexOrDecimal to accept unquoted decimals too (#26758) --- common/math/big.go | 11 +++++++++++ common/math/integer.go | 11 +++++++++++ 2 files changed, 22 insertions(+) diff --git a/common/math/big.go b/common/math/big.go index 48427810e..1c2afa749 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -49,6 +49,17 @@ func NewHexOrDecimal256(x int64) *HexOrDecimal256 { return &h } +// UnmarshalJSON implements json.Unmarshaler. +// +// It is similar to UnmarshalText, but allows parsing real decimals too, not just +// quoted decimal strings. +func (i *HexOrDecimal256) UnmarshalJSON(input []byte) error { + if len(input) > 0 && input[0] == '"' { + input = input[1 : len(input)-1] + } + return i.UnmarshalText(input) +} + // UnmarshalText implements encoding.TextUnmarshaler. func (i *HexOrDecimal256) UnmarshalText(input []byte) error { bigint, ok := ParseBig256(string(input)) diff --git a/common/math/integer.go b/common/math/integer.go index 50d3eba1f..da01c0a08 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -41,6 +41,17 @@ const ( // HexOrDecimal64 marshals uint64 as hex or decimal. type HexOrDecimal64 uint64 +// UnmarshalJSON implements json.Unmarshaler. +// +// It is similar to UnmarshalText, but allows parsing real decimals too, not just +// quoted decimal strings. +func (i *HexOrDecimal64) UnmarshalJSON(input []byte) error { + if len(input) > 0 && input[0] == '"' { + input = input[1 : len(input)-1] + } + return i.UnmarshalText(input) +} + // UnmarshalText implements encoding.TextUnmarshaler. func (i *HexOrDecimal64) UnmarshalText(input []byte) error { int, ok := ParseUint64(string(input)) From 73b01f40ceb6bcb6f9f44c2a3d6f963b40452b47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 22 Feb 2023 14:23:51 +0200 Subject: [PATCH 24/26] params: release Geth v1.11.2 --- params/version.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/params/version.go b/params/version.go index 9be8a26a5..8bbf0b99b 100644 --- a/params/version.go +++ b/params/version.go @@ -21,10 +21,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 11 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release - VersionMeta = "unstable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 2 // Patch version component of the current release + VersionMeta = "stable" // Version metadata to append to the version string ) // Version holds the textual version string. From 7bf0c614f6bd1af4eee6f8c485d4276a8d656ba8 Mon Sep 17 00:00:00 2001 From: philip-morlier Date: Wed, 22 Feb 2023 10:01:43 -0800 Subject: [PATCH 25/26] Modified chain freezer map keys to Public to match v1.11.2 --- core/rawdb/plugin_hooks.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/core/rawdb/plugin_hooks.go b/core/rawdb/plugin_hooks.go index 05a0b36d3..98fd27a2e 100644 --- a/core/rawdb/plugin_hooks.go +++ b/core/rawdb/plugin_hooks.go @@ -70,7 +70,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) { receipts []byte td []byte ) - if hashi, ok := update[chainFreezerHashTable]; ok { + if hashi, ok := update[ChainFreezerHashTable]; ok { switch v := hashi.(type) { case []byte: hash = v @@ -78,7 +78,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) { hash, _ = rlp.EncodeToBytes(v) } } - if headeri, ok := update[chainFreezerHeaderTable]; ok { + if headeri, ok := update[ChainFreezerHeaderTable]; ok { switch v := headeri.(type) { case []byte: header = v @@ -86,7 +86,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) { header, _ = rlp.EncodeToBytes(v) } } - if bodyi, ok := update[chainFreezerBodiesTable]; ok { + if bodyi, ok := update[ChainFreezerBodiesTable]; ok { switch v := bodyi.(type) { case []byte: body = v @@ -94,7 +94,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) { body, _ = rlp.EncodeToBytes(v) } } - if receiptsi, ok := update[chainFreezerReceiptTable]; ok { + if receiptsi, ok := update[ChainFreezerReceiptTable]; ok { switch v := receiptsi.(type) { case []byte: receipts = v @@ -102,7 +102,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) { receipts, _ = rlp.EncodeToBytes(v) } } - if tdi, ok := update[chainFreezerDifficultyTable]; ok { + if tdi, ok := update[ChainFreezerDifficultyTable]; ok { switch v := tdi.(type) { case []byte: td = v From 89bc9e0ee6aa1b2efca56d7c1311cffcaa648f44 Mon Sep 17 00:00:00 2001 From: philip-morlier Date: Thu, 23 Feb 2023 09:44:05 -0800 Subject: [PATCH 26/26] updtated utils to v0.0.24 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4596d45c0..494506567 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/mattn/go-isatty v0.0.16 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 - github.com/openrelayxyz/plugeth-utils v0.0.23 + github.com/openrelayxyz/plugeth-utils v0.0.24 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/prometheus/tsdb v0.7.1 github.com/rs/cors v1.7.0 diff --git a/go.sum b/go.sum index e405bc759..cb1d14cc3 100644 --- a/go.sum +++ b/go.sum @@ -451,8 +451,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/openrelayxyz/plugeth-utils v0.0.23 h1:TKNGnRPWZ3mXNUHfjr1iRjsto2r6ngNZb95dJLj2j5w= -github.com/openrelayxyz/plugeth-utils v0.0.23/go.mod h1:zGm3/elx1rCcrYAFUQ5/He7AG/n039/3xYg0/Q8nqcQ= +github.com/openrelayxyz/plugeth-utils v0.0.24 h1:Q2BR3SlwHovNFLbEtGrFFqgOCnL+ONyrwTC9wKKAej4= +github.com/openrelayxyz/plugeth-utils v0.0.24/go.mod h1:W1Hwwhv04MCuNCcI6a62/kL6eWbH7OO+KvpBEyQTgIs= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=