* Write state diff to CSV (#2) * port statediff from9b7fd9af80/statediff/statediff.go
; minor fixes * integrating state diff extracting, building, and persisting into geth processes * work towards persisting created statediffs in ipfs; based off github.com/vulcanize/eth-block-extractor * Add a state diff service * Remove diff extractor from blockchain * Update imports * Move statediff on/off check to geth cmd config * Update starting state diff service * Add debugging logs for creating diff * Add statediff extractor and builder tests and small refactoring * Start to write statediff to a CSV * Restructure statediff directory * Pull CSV publishing methods into their own file * Reformatting due to go fmt * Add gomega to vendor dir * Remove testing focuses * Update statediff tests to use golang test pkg instead of ginkgo - builder_test - extractor_test - publisher_test * Use hexutil.Encode instead of deprecated common.ToHex * Remove OldValue from DiffBigInt and DiffUint64 fields * Update builder test * Remove old storage value from updated accounts * Remove old values from created/deleted accounts * Update publisher to account for only storing current account values * Update service loop and fetching previous block * Update testing - remove statediff ginkgo test suite file - move mocks to their own dir * Updates per go fmt * Updates to tests * Pass statediff mode and path in through cli * Return filename from publisher * Remove some duplication in builder * Remove code field from state diff output this is the contract byte code, and it can still be obtained by querying the db by the codeHash * Consolidate acct diff structs for updated & updated/deleted accts * Include block number in csv filename * Clean up error logging * Cleanup formatting, spelling, etc * Address PR comments * Add contract address and storage value to csv * Refactor accumulating account row in csv publisher * Add DiffStorage struct * Add storage key to csv * Address PR comments * Fix publisher to include rows for accounts that don't have store updates * Update builder test after merging in release/1.8 * Update test contract to include storage on contract intialization - so that we're able to test that storage diffing works for created and deleted accounts (not just updated accounts). * Factor out a common trie iterator method in builder * Apply goimports to statediff * Apply gosimple changes to statediff * Gracefully exit geth command(#4) * Statediff for full node (#6) * Open a trie from the in-memory database * Use a node's LeafKey as an identifier instead of the address It was proving difficult to find look the address up from a given path with a full node (sometimes the value wouldn't exist in the disk db). So, instead, for now we are using the node's LeafKey with is a Keccak256 hash of the address, so if we know the address we can figure out which LeafKey it matches up to. * Make sure that statediff has been processed before pruning * Use blockchain stateCache.OpenTrie for storage diffs * Clean up log lines and remove unnecessary fields from builder * Apply go fmt changes * Add a sleep to the blockchain test * refactoring/reorganizing packages * refactoring statediff builder and types and adjusted to relay proofs and paths (still need to make this optional) * refactoring state diff service and adding api which allows for streaming state diff payloads over an rpc websocket subscription * make proofs and paths optional + compress service loop into single for loop (may be missing something here) * option to process intermediate nodes * make state diff rlp serializable * cli parameter to limit statediffing to select account addresses + test * review fixes and fixes for issues ran into in integration * review fixes; proper method signature for api; adjust service so that statediff processing is halted/paused until there is at least one subscriber listening for the results * adjust buffering to improve stability; doc.go; fix notifier err handling * relay receipts with the rest of the data + review fixes/changes * rpc method to get statediff at specific block; requires archival node or the block be within the pruning range * fix linter issues * include total difficulty to the payload * fix state diff builder: emit actual leaf nodes instead of value nodes; diff on the leaf not on the value; emit correct path for intermediate nodes * adjust statediff builder tests to changes and extend to test intermediate nodes; golint * add genesis block to test; handle block 0 in StateDiffAt * rlp files for mainnet blocks 0-3, for tests * builder test on mainnet blocks * common.BytesToHash(path) => crypto.Keaccak256(hash) in builder; BytesToHash produces same hash for e.g. []byte{} and []byte{\x00} - prefix \x00 steps are inconsequential to the hash result * complete tests for early mainnet blocks * diff type for representing deleted accounts * fix builder so that we handle account deletions properly and properly diff storage when an account is moved to a new path; update params * remove cli params; moving them to subscriber defined * remove unneeded bc methods * update service and api; statediffing params are now defined by user through api rather than by service provider by cli * update top level tests * add ability to watch specific storage slots (leaf keys) only * comments; explain logic * update mainnet blocks test * update api_test.go * storage leafkey filter test * cleanup chain maker * adjust chain maker for tests to add an empty account in block1 and switch to EIP-158 afterwards (now we just need to generate enough accounts until one causes the empty account to be touched and removed post-EIP-158 so we can simulate and test that process...); also added 2 new blocks where more contract storage is set and old slots are set to zero so they are removed so we can test that * found an account whose creation causes the empty account to be moved to a new path; this should count as 'touching; the empty account and cause it to be removed according to eip-158... but it doesn't * use new contract in unit tests that has self-destruct ability, so we can test eip-158 since simply moving an account to new path doesn't count as 'touchin' it * handle storage deletions * tests for eip-158 account removal and storage value deletions; there is one edge case left to test where we remove 1 account when only two exist such that the remaining account is moved up and replaces the root branch node * finish testing known edge cases * add endpoint to fetch all state and storage nodes at a given blockheight; useful for generating a recent atate cache/snapshot that we can diff forward from rather than needing to collect all diffs from genesis * test for state trie builder * if statediffing is on, lock tries in triedb until the statediffing service signals they are done using them * fix mock blockchain; golint; bump patch * increase maxRequestContentLength; bump patch * log the sizes of the state objects we are sending * CI build (#20) * CI: run build on PR and on push to master * CI: debug building geth * CI: fix coping file * CI: fix coping file v2 * CI: temporary upload file to release asset * CI: get release upload_url by tag, upload asset to current relase * CI: fix tag name * fix ci build on statediff_at_anyblock-1.9.11 branch * fix publishing assets in release * use context deadline for timeout in eth_call * collect and emit codehash=>code mappings for state objects * subscription endpoint for retrieving all the codehash=>code mappings that exist at provided height * Implement WriteStateDiffAt * Writes state diffs directly to postgres * Adds CLI flags to configure PG * Refactors builder output with callbacks * Copies refactored postgres handling code from ipld-eth-indexer * rename PostgresCIDWriter.{index->upsert}* * rm unused * output code & codehash iteratively * had to rf some types for this * prometheus metrics output * duplicate recent eth-indexer changes * migrations and metrics... * [wip] prom.Init() here? another CLI flag? * tidy & DRY * statediff WriteLoop service + CLI flag * [wip] update test mocks * todo - do something meaningful to test write loop * logging * use geth log * port tests to go testing * drop ginkgo/gomega * fix and cleanup tests * fail before defer statement * delete vendor/ dir * fixes after rebase onto 1.9.23 * fix API registration * use golang 1.15.5 version (#34) * bump version meta; add 0.0.11 branch to actions * bump version meta; update github actions workflows * statediff: refactor metrics * Remove redundant statediff/indexer/prom tooling and use existing prometheus integration. * "indexer" namespace for metrics * add reporting loop for db metrics * doc * metrics for statediff stats * metrics namespace/subsystem = statediff/{indexer,service} * statediff: use a worker pool (for direct writes) * fix test * fix chain event subscription * log tweaks * func name * unused import * intermediate chain event channel for metrics * update github actions; linting * add poststate and status to receipt ipld indexes * stateDiffFor endpoints for fetching or writing statediff object by blockhash; bump statediff version * fixes after rebase on to v1.10.1 * update github actions and version meta; go fmt * add leaf key to removed 'nodes' * include Postgres migrations and schema * service documentation * touching up * update github actions after rebase * fix connection leak (misplaced defer) and perform proper rollback on errs * improve error logging; handle PushBlock internal err * build docker image and publish it to Docker Hub on release * add access list tx to unit tests * MarshalBinary and UnmarshalBinary methods for receipt * fix error caused by 2718 by using MarshalBinary instead of EncodeRLP methods * ipld encoding/decoding tests * update TxModel; add AccessListElementModel * index tx type and access lists * add access list metrics * unit tests for tx_type and access list table * unit tests for receipt marshal/unmarshal binary methods * improve documentation of the encoding methods * fix issue identified in linting * update github actions and version meta after rebase * unit test that fails undeterministically on eip2930 txs, giving same error we are seeing in prod * Include genesis block state diff. * documentation on versioning, rebasing, releasing; bump version meta * Add geth and statediff unit test to CI. * Set pgpassword in env. * Added comments. * Add new major branch to github action. * Add support for Dynamic txn(EIP-1559). * Update version meta to 0.0.24 * Verify block base fee in test. * Fix base_fee type and add backward compatible test. * Remove type definition for AccessListElementModel * Change basefee to int64/bigint. * block and uncle reward in PoA network = 0 (#87) * in PoA networks there is no block and uncle rewards * bump meta version * (cherry picked from commitb64ca14689
) * Use Ropsten to test block reward. * Add Makefile target to build static linux binaries. * Strip symbol tables from static binaries. * Fix block_fee to support NULL values. * bump version meta. * Add new major branch to github action. * rename doc.go to README.md * Create a seperate table for storing logs * Bump statediff version to 0.0.26. * add btree index to state/storage_cids.node_type; updated schema * Dedup receipt data. * Fix linter errors. * Address comments. * Bump statediff version to 0.0.27. * new cli flag for initializing db first time service is ran * only write Removed node ipld block (on db init) and reuse constant cid and mhkey * test new handling of Removed nodes; don't require init flag * log metrics * Add new major branch to github action. * Fix build. * Update golang version in CI. * Use ipld-eth-db in testing. * Remove migration from repo. * Add new major branch to github action. *Use `GetTd` instead of `GetTdByHash`6289137827
* Add new major branch to github action. * Report DB metrics * batch inserts to public.blocks * v2 => v3 major refactor * fixes and cli integration for new options * update example command in readme * ashwin's fix for failing pgx unit test * update to use new schema; fix pgx driver * indexer that writes sql stmts out to a file * cli integration * fix unit tests * use node_id as PK/FK * misc fixes/adjustments * update README * cleanup; more unit tests * basefee is big.Int, it won't always fit in int64 * adjust for schema updates * finish unit tests * test harnest for arbitrary mainnet blocks and receipts * cache problematic block locally for quicker testing/easier CI testing * fix issue with log/logTrie processing * remove some unecessary hashing operations * handle edge case * add more 'bad blocks' to mainnet_tests * increase file write buffer size * increase buffer further * fix rct trie multicodec type * extend testing * log trie fk fix * bump statediff meta version; use db v0.3.0 in compose * skip file writing tests in CI, for now * prevent parallel execution of tests in different pkgs (suspect this is what causes our deadlock to show up only in CI test env) adjust write buffering * fix rct unit tests * fix README formatting * port retry on deadlock detection feature * new workflow on-'master' targets * update version meta * improve test coverage for logs * fix possible race condition * fix CI * check tx pool state at end of unit tests * better logging of rollbacks and dead lock retries
443 lines
15 KiB
Go
443 lines
15 KiB
Go
// Copyright 2015 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package trie
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/common/prque"
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
)
|
|
|
|
// ErrNotRequested is returned by the trie sync when it's requested to process a
|
|
// node it did not request.
|
|
var ErrNotRequested = errors.New("not requested")
|
|
|
|
// ErrAlreadyProcessed is returned by the trie sync when it's requested to process a
|
|
// node it already processed previously.
|
|
var ErrAlreadyProcessed = errors.New("already processed")
|
|
|
|
// maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The
|
|
// role of this value is to limit the number of trie nodes that get expanded in
|
|
// memory if the node was configured with a significant number of peers.
|
|
const maxFetchesPerDepth = 16384
|
|
|
|
// request represents a scheduled or already in-flight state retrieval request.
|
|
type request struct {
|
|
path []byte // Merkle path leading to this node for prioritization
|
|
hash common.Hash // Hash of the node data content to retrieve
|
|
data []byte // Data content of the node, cached until all subtrees complete
|
|
code bool // Whether this is a code entry
|
|
|
|
parents []*request // Parent state nodes referencing this entry (notify all upon completion)
|
|
deps int // Number of dependencies before allowed to commit this node
|
|
|
|
callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
|
|
}
|
|
|
|
// SyncPath is a path tuple identifying a particular trie node either in a single
|
|
// trie (account) or a layered trie (account -> storage).
|
|
//
|
|
// Content wise the tuple either has 1 element if it addresses a node in a single
|
|
// trie or 2 elements if it addresses a node in a stacked trie.
|
|
//
|
|
// To support aiming arbitrary trie nodes, the path needs to support odd nibble
|
|
// lengths. To avoid transferring expanded hex form over the network, the last
|
|
// part of the tuple (which needs to index into the middle of a trie) is compact
|
|
// encoded. In case of a 2-tuple, the first item is always 32 bytes so that is
|
|
// simple binary encoded.
|
|
//
|
|
// Examples:
|
|
// - Path 0x9 -> {0x19}
|
|
// - Path 0x99 -> {0x0099}
|
|
// - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19}
|
|
// - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
|
|
type SyncPath [][]byte
|
|
|
|
// newSyncPath converts an expanded trie path from nibble form into a compact
|
|
// version that can be sent over the network.
|
|
func newSyncPath(path []byte) SyncPath {
|
|
// If the hash is from the account trie, append a single item, if it
|
|
// is from the a storage trie, append a tuple. Note, the length 64 is
|
|
// clashing between account leaf and storage root. It's fine though
|
|
// because having a trie node at 64 depth means a hash collision was
|
|
// found and we're long dead.
|
|
if len(path) < 64 {
|
|
return SyncPath{hexToCompact(path)}
|
|
}
|
|
return SyncPath{hexToKeyBytes(path[:64]), hexToCompact(path[64:])}
|
|
}
|
|
|
|
// SyncResult is a response with requested data along with it's hash.
|
|
type SyncResult struct {
|
|
Hash common.Hash // Hash of the originally unknown trie node
|
|
Data []byte // Data content of the retrieved node
|
|
}
|
|
|
|
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
|
|
// persisted data items.
|
|
type syncMemBatch struct {
|
|
nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes
|
|
codes map[common.Hash][]byte // In-memory membatch of recently completed codes
|
|
}
|
|
|
|
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
|
|
func newSyncMemBatch() *syncMemBatch {
|
|
return &syncMemBatch{
|
|
nodes: make(map[common.Hash][]byte),
|
|
codes: make(map[common.Hash][]byte),
|
|
}
|
|
}
|
|
|
|
// hasNode reports the trie node with specific hash is already cached.
|
|
func (batch *syncMemBatch) hasNode(hash common.Hash) bool {
|
|
_, ok := batch.nodes[hash]
|
|
return ok
|
|
}
|
|
|
|
// hasCode reports the contract code with specific hash is already cached.
|
|
func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
|
|
_, ok := batch.codes[hash]
|
|
return ok
|
|
}
|
|
|
|
// Sync is the main state trie synchronisation scheduler, which provides yet
|
|
// unknown trie hashes to retrieve, accepts node data associated with said hashes
|
|
// and reconstructs the trie step by step until all is done.
|
|
type Sync struct {
|
|
database ethdb.KeyValueReader // Persistent database to check for existing entries
|
|
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
|
|
nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash
|
|
codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash
|
|
queue *prque.Prque // Priority queue with the pending requests
|
|
fetches map[int]int // Number of active fetches per trie node depth
|
|
}
|
|
|
|
// NewSync creates a new trie data download scheduler.
|
|
func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync {
|
|
ts := &Sync{
|
|
database: database,
|
|
membatch: newSyncMemBatch(),
|
|
nodeReqs: make(map[common.Hash]*request),
|
|
codeReqs: make(map[common.Hash]*request),
|
|
queue: prque.New(nil),
|
|
fetches: make(map[int]int),
|
|
}
|
|
ts.AddSubTrie(root, nil, common.Hash{}, callback)
|
|
return ts
|
|
}
|
|
|
|
// AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
|
|
func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) {
|
|
// Short circuit if the trie is empty or already known
|
|
if root == emptyRoot {
|
|
return
|
|
}
|
|
if s.membatch.hasNode(root) {
|
|
return
|
|
}
|
|
// If database says this is a duplicate, then at least the trie node is
|
|
// present, and we hold the assumption that it's NOT legacy contract code.
|
|
if rawdb.HasTrieNode(s.database, root) {
|
|
return
|
|
}
|
|
// Assemble the new sub-trie sync request
|
|
req := &request{
|
|
path: path,
|
|
hash: root,
|
|
callback: callback,
|
|
}
|
|
// If this sub-trie has a designated parent, link them together
|
|
if parent != (common.Hash{}) {
|
|
ancestor := s.nodeReqs[parent]
|
|
if ancestor == nil {
|
|
panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
|
|
}
|
|
ancestor.deps++
|
|
req.parents = append(req.parents, ancestor)
|
|
}
|
|
s.schedule(req)
|
|
}
|
|
|
|
// AddCodeEntry schedules the direct retrieval of a contract code that should not
|
|
// be interpreted as a trie node, but rather accepted and stored into the database
|
|
// as is.
|
|
func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
|
|
// Short circuit if the entry is empty or already known
|
|
if hash == emptyState {
|
|
return
|
|
}
|
|
if s.membatch.hasCode(hash) {
|
|
return
|
|
}
|
|
// If database says duplicate, the blob is present for sure.
|
|
// Note we only check the existence with new code scheme, fast
|
|
// sync is expected to run with a fresh new node. Even there
|
|
// exists the code with legacy format, fetch and store with
|
|
// new scheme anyway.
|
|
if rawdb.HasCodeWithPrefix(s.database, hash) {
|
|
return
|
|
}
|
|
// Assemble the new sub-trie sync request
|
|
req := &request{
|
|
path: path,
|
|
hash: hash,
|
|
code: true,
|
|
}
|
|
// If this sub-trie has a designated parent, link them together
|
|
if parent != (common.Hash{}) {
|
|
ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq
|
|
if ancestor == nil {
|
|
panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
|
|
}
|
|
ancestor.deps++
|
|
req.parents = append(req.parents, ancestor)
|
|
}
|
|
s.schedule(req)
|
|
}
|
|
|
|
// Missing retrieves the known missing nodes from the trie for retrieval. To aid
|
|
// both eth/6x style fast sync and snap/1x style state sync, the paths of trie
|
|
// nodes are returned too, as well as separate hash list for codes.
|
|
func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) {
|
|
var (
|
|
nodeHashes []common.Hash
|
|
nodePaths []SyncPath
|
|
codeHashes []common.Hash
|
|
)
|
|
for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) {
|
|
// Retrieve the next item in line
|
|
item, prio := s.queue.Peek()
|
|
|
|
// If we have too many already-pending tasks for this depth, throttle
|
|
depth := int(prio >> 56)
|
|
if s.fetches[depth] > maxFetchesPerDepth {
|
|
break
|
|
}
|
|
// Item is allowed to be scheduled, add it to the task list
|
|
s.queue.Pop()
|
|
s.fetches[depth]++
|
|
|
|
hash := item.(common.Hash)
|
|
if req, ok := s.nodeReqs[hash]; ok {
|
|
nodeHashes = append(nodeHashes, hash)
|
|
nodePaths = append(nodePaths, newSyncPath(req.path))
|
|
} else {
|
|
codeHashes = append(codeHashes, hash)
|
|
}
|
|
}
|
|
return nodeHashes, nodePaths, codeHashes
|
|
}
|
|
|
|
// Process injects the received data for requested item. Note it can
|
|
// happpen that the single response commits two pending requests(e.g.
|
|
// there are two requests one for code and one for node but the hash
|
|
// is same). In this case the second response for the same hash will
|
|
// be treated as "non-requested" item or "already-processed" item but
|
|
// there is no downside.
|
|
func (s *Sync) Process(result SyncResult) error {
|
|
// If the item was not requested either for code or node, bail out
|
|
if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil {
|
|
return ErrNotRequested
|
|
}
|
|
// There is an pending code request for this data, commit directly
|
|
var filled bool
|
|
if req := s.codeReqs[result.Hash]; req != nil && req.data == nil {
|
|
filled = true
|
|
req.data = result.Data
|
|
s.commit(req)
|
|
}
|
|
// There is an pending node request for this data, fill it.
|
|
if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil {
|
|
filled = true
|
|
// Decode the node data content and update the request
|
|
node, err := decodeNode(result.Hash[:], result.Data)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
req.data = result.Data
|
|
|
|
// Create and schedule a request for all the children nodes
|
|
requests, err := s.children(req, node)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(requests) == 0 && req.deps == 0 {
|
|
s.commit(req)
|
|
} else {
|
|
req.deps += len(requests)
|
|
for _, child := range requests {
|
|
s.schedule(child)
|
|
}
|
|
}
|
|
}
|
|
if !filled {
|
|
return ErrAlreadyProcessed
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Commit flushes the data stored in the internal membatch out to persistent
|
|
// storage, returning any occurred error.
|
|
func (s *Sync) Commit(dbw ethdb.Batch) error {
|
|
// Dump the membatch into a database dbw
|
|
for key, value := range s.membatch.nodes {
|
|
rawdb.WriteTrieNode(dbw, key, value)
|
|
}
|
|
for key, value := range s.membatch.codes {
|
|
rawdb.WriteCode(dbw, key, value)
|
|
}
|
|
// Drop the membatch data and return
|
|
s.membatch = newSyncMemBatch()
|
|
return nil
|
|
}
|
|
|
|
// Pending returns the number of state entries currently pending for download.
|
|
func (s *Sync) Pending() int {
|
|
return len(s.nodeReqs) + len(s.codeReqs)
|
|
}
|
|
|
|
// schedule inserts a new state retrieval request into the fetch queue. If there
|
|
// is already a pending request for this node, the new request will be discarded
|
|
// and only a parent reference added to the old one.
|
|
func (s *Sync) schedule(req *request) {
|
|
var reqset = s.nodeReqs
|
|
if req.code {
|
|
reqset = s.codeReqs
|
|
}
|
|
// If we're already requesting this node, add a new reference and stop
|
|
if old, ok := reqset[req.hash]; ok {
|
|
old.parents = append(old.parents, req.parents...)
|
|
return
|
|
}
|
|
reqset[req.hash] = req
|
|
|
|
// Schedule the request for future retrieval. This queue is shared
|
|
// by both node requests and code requests. It can happen that there
|
|
// is a trie node and code has same hash. In this case two elements
|
|
// with same hash and same or different depth will be pushed. But it's
|
|
// ok the worst case is the second response will be treated as duplicated.
|
|
prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents
|
|
for i := 0; i < 14 && i < len(req.path); i++ {
|
|
prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order
|
|
}
|
|
s.queue.Push(req.hash, prio)
|
|
}
|
|
|
|
// children retrieves all the missing children of a state trie entry for future
|
|
// retrieval scheduling.
|
|
func (s *Sync) children(req *request, object node) ([]*request, error) {
|
|
// Gather all the children of the node, irrelevant whether known or not
|
|
type child struct {
|
|
path []byte
|
|
node node
|
|
}
|
|
var children []child
|
|
|
|
switch node := (object).(type) {
|
|
case *shortNode:
|
|
key := node.Key
|
|
if hasTerm(key) {
|
|
key = key[:len(key)-1]
|
|
}
|
|
children = []child{{
|
|
node: node.Val,
|
|
path: append(append([]byte(nil), req.path...), key...),
|
|
}}
|
|
case *fullNode:
|
|
for i := 0; i < 17; i++ {
|
|
if node.Children[i] != nil {
|
|
children = append(children, child{
|
|
node: node.Children[i],
|
|
path: append(append([]byte(nil), req.path...), byte(i)),
|
|
})
|
|
}
|
|
}
|
|
default:
|
|
panic(fmt.Sprintf("unknown node: %+v", node))
|
|
}
|
|
// Iterate over the children, and request all unknown ones
|
|
requests := make([]*request, 0, len(children))
|
|
for _, child := range children {
|
|
// Notify any external watcher of a new key/value node
|
|
if req.callback != nil {
|
|
if node, ok := (child.node).(valueNode); ok {
|
|
var paths [][]byte
|
|
if len(child.path) == 2*common.HashLength {
|
|
paths = append(paths, hexToKeyBytes(child.path))
|
|
} else if len(child.path) == 4*common.HashLength {
|
|
paths = append(paths, hexToKeyBytes(child.path[:2*common.HashLength]))
|
|
paths = append(paths, hexToKeyBytes(child.path[2*common.HashLength:]))
|
|
}
|
|
if err := req.callback(paths, child.path, node, req.hash); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
}
|
|
// If the child references another node, resolve or schedule
|
|
if node, ok := (child.node).(hashNode); ok {
|
|
// Try to resolve the node from the local database
|
|
hash := common.BytesToHash(node)
|
|
if s.membatch.hasNode(hash) {
|
|
continue
|
|
}
|
|
// If database says duplicate, then at least the trie node is present
|
|
// and we hold the assumption that it's NOT legacy contract code.
|
|
if rawdb.HasTrieNode(s.database, hash) {
|
|
continue
|
|
}
|
|
// Locally unknown node, schedule for retrieval
|
|
requests = append(requests, &request{
|
|
path: child.path,
|
|
hash: hash,
|
|
parents: []*request{req},
|
|
callback: req.callback,
|
|
})
|
|
}
|
|
}
|
|
return requests, nil
|
|
}
|
|
|
|
// commit finalizes a retrieval request and stores it into the membatch. If any
|
|
// of the referencing parent requests complete due to this commit, they are also
|
|
// committed themselves.
|
|
func (s *Sync) commit(req *request) (err error) {
|
|
// Write the node content to the membatch
|
|
if req.code {
|
|
s.membatch.codes[req.hash] = req.data
|
|
delete(s.codeReqs, req.hash)
|
|
s.fetches[len(req.path)]--
|
|
} else {
|
|
s.membatch.nodes[req.hash] = req.data
|
|
delete(s.nodeReqs, req.hash)
|
|
s.fetches[len(req.path)]--
|
|
}
|
|
// Check all parents for completion
|
|
for _, parent := range req.parents {
|
|
parent.deps--
|
|
if parent.deps == 0 {
|
|
if err := s.commit(parent); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
return nil
|
|
}
|