2023-06-14 12:43:34 +00:00
|
|
|
// Copyright 2019 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
// Contains a batch of utility type declarations used by the tests. As the node
|
|
|
|
// operates on unique types, a lot of them are needed to check various features.
|
|
|
|
|
|
|
|
package statediff
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2023-08-01 18:27:54 +00:00
|
|
|
"context"
|
2023-06-14 12:43:34 +00:00
|
|
|
"fmt"
|
2023-08-01 18:27:54 +00:00
|
|
|
"sync"
|
2023-06-14 12:43:34 +00:00
|
|
|
"time"
|
|
|
|
|
2023-08-03 11:00:32 +00:00
|
|
|
iterutils "github.com/cerc-io/eth-iterator-utils"
|
2023-08-26 17:20:29 +00:00
|
|
|
"github.com/cerc-io/eth-iterator-utils/tracker"
|
2023-06-14 12:43:34 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2023-08-01 18:27:54 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2023-06-23 12:42:55 +00:00
|
|
|
|
|
|
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
|
|
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
|
|
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
|
|
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
|
|
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
|
|
|
"github.com/cerc-io/plugeth-statediff/utils"
|
|
|
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
2023-06-14 12:43:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
emptyNode, _ = rlp.EncodeToBytes(&[]byte{})
|
|
|
|
emptyContractRoot = crypto.Keccak256Hash(emptyNode)
|
2023-07-19 06:35:54 +00:00
|
|
|
nullCodeHash = crypto.Keccak256([]byte{})
|
2023-06-23 12:42:55 +00:00
|
|
|
zeroHash common.Hash
|
2023-08-01 18:27:54 +00:00
|
|
|
|
2023-08-25 10:49:31 +00:00
|
|
|
defaultSubtrieWorkers uint = 8
|
2023-06-14 12:43:34 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Builder interface exposes the method for building a state diff between two blocks
|
|
|
|
type Builder interface {
|
2023-07-19 06:35:54 +00:00
|
|
|
BuildStateDiffObject(Args, Params) (sdtypes.StateObject, error)
|
|
|
|
WriteStateDiff(Args, Params, sdtypes.StateNodeSink, sdtypes.IPLDSink) error
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
type builder struct {
|
2023-07-19 06:35:54 +00:00
|
|
|
// state cache is safe for concurrent reads
|
2023-08-01 18:27:54 +00:00
|
|
|
stateCache adapt.StateView
|
2023-08-25 10:49:31 +00:00
|
|
|
subtrieWorkers uint
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-25 08:17:15 +00:00
|
|
|
type accountUpdate struct {
|
|
|
|
new sdtypes.AccountWrapper
|
|
|
|
oldRoot common.Hash
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
type accountUpdateMap map[string]*accountUpdate
|
|
|
|
|
|
|
|
func appender[T any](to *[]T) func(T) error {
|
|
|
|
return func(a T) error {
|
|
|
|
*to = append(*to, a)
|
2023-06-14 12:43:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-01 18:27:54 +00:00
|
|
|
func syncedAppender[T any](to *[]T) func(T) error {
|
|
|
|
var mtx sync.Mutex
|
|
|
|
return func(a T) error {
|
|
|
|
mtx.Lock()
|
|
|
|
*to = append(*to, a)
|
|
|
|
mtx.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-14 12:43:34 +00:00
|
|
|
// NewBuilder is used to create a statediff builder
|
2023-06-23 12:42:55 +00:00
|
|
|
func NewBuilder(stateCache adapt.StateView) Builder {
|
2023-08-26 17:20:29 +00:00
|
|
|
return &builder{
|
2023-08-01 18:27:54 +00:00
|
|
|
stateCache: stateCache,
|
|
|
|
subtrieWorkers: defaultSubtrieWorkers,
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) SetSubtrieWorkers(n uint) {
|
|
|
|
sdb.subtrieWorkers = n
|
|
|
|
}
|
|
|
|
|
2023-06-14 12:43:34 +00:00
|
|
|
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) BuildStateDiffObject(args Args, params Params) (sdtypes.StateObject, error) {
|
2023-06-23 12:42:55 +00:00
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildStateDiffObjectTimer)
|
|
|
|
var stateNodes []sdtypes.StateLeafNode
|
|
|
|
var iplds []sdtypes.IPLD
|
2023-08-01 18:27:54 +00:00
|
|
|
err := sdb.WriteStateDiff(args, params, syncedAppender(&stateNodes), syncedAppender(&iplds))
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
2023-06-23 12:42:55 +00:00
|
|
|
return sdtypes.StateObject{}, err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-06-23 12:42:55 +00:00
|
|
|
return sdtypes.StateObject{
|
2023-06-14 12:43:34 +00:00
|
|
|
BlockHash: args.BlockHash,
|
|
|
|
BlockNumber: args.BlockNumber,
|
|
|
|
Nodes: stateNodes,
|
|
|
|
IPLDs: iplds,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-07-19 06:35:54 +00:00
|
|
|
// WriteStateDiff writes a statediff object to output sinks
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) WriteStateDiff(
|
2023-07-25 08:17:15 +00:00
|
|
|
args Args, params Params,
|
|
|
|
nodeSink sdtypes.StateNodeSink,
|
|
|
|
ipldSink sdtypes.IPLDSink,
|
|
|
|
) error {
|
2023-07-19 06:35:54 +00:00
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.WriteStateDiffTimer)
|
2023-06-14 12:43:34 +00:00
|
|
|
// Load tries for old and new states
|
2023-08-01 18:27:54 +00:00
|
|
|
triea, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return fmt.Errorf("error opening old state trie: %w", err)
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-08-01 18:27:54 +00:00
|
|
|
trieb, err := sdb.stateCache.OpenTrie(args.NewStateRoot)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return fmt.Errorf("error opening new state trie: %w", err)
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-08-01 18:27:54 +00:00
|
|
|
subitersA := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
|
|
|
subitersB := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
2023-06-14 12:43:34 +00:00
|
|
|
|
2023-07-19 06:35:54 +00:00
|
|
|
logger := log.New("hash", args.BlockHash, "number", args.BlockNumber)
|
2023-08-26 17:20:29 +00:00
|
|
|
// errgroup will cancel if any group fails
|
2023-08-01 18:27:54 +00:00
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
2023-08-25 10:49:31 +00:00
|
|
|
for i := uint(0); i < sdb.subtrieWorkers; i++ {
|
|
|
|
func(subdiv uint) {
|
2023-08-01 18:27:54 +00:00
|
|
|
g.Go(func() error {
|
|
|
|
a, b := subitersA[subdiv], subitersB[subdiv]
|
2023-08-26 17:20:29 +00:00
|
|
|
it, aux := utils.NewSymmetricDifferenceIterator(a, b)
|
|
|
|
return sdb.processAccounts(ctx,
|
|
|
|
it, aux,
|
|
|
|
params.watchedAddressesLeafPaths,
|
|
|
|
nodeSink, ipldSink, logger,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
return g.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteStateDiff writes a statediff object to output sinks
|
|
|
|
func (sdb *builder) WriteStateDiffTracked(
|
|
|
|
args Args, params Params,
|
|
|
|
nodeSink sdtypes.StateNodeSink,
|
|
|
|
ipldSink sdtypes.IPLDSink,
|
|
|
|
tracker *tracker.Tracker,
|
|
|
|
) error {
|
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.WriteStateDiffTimer)
|
|
|
|
// Load tries for old and new states
|
|
|
|
triea, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error opening old state trie: %w", err)
|
|
|
|
}
|
|
|
|
trieb, err := sdb.stateCache.OpenTrie(args.NewStateRoot)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error opening new state trie: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var subiters []trie.NodeIterator
|
|
|
|
var auxes []*utils.SymmDiffAux
|
|
|
|
// Constructor for difference iterator at a specific (recovered) path
|
|
|
|
makeIterator := func(key []byte) trie.NodeIterator {
|
|
|
|
a := triea.NodeIterator(key)
|
|
|
|
b := trieb.NodeIterator(key)
|
|
|
|
diffit, aux := utils.NewSymmetricDifferenceIterator(a, b)
|
|
|
|
// iterators are constructed in-order, so these will align
|
|
|
|
auxes = append(auxes, aux)
|
|
|
|
return diffit
|
|
|
|
}
|
|
|
|
subiters, err = tracker.Restore(makeIterator)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error restoring iterators: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if subiters != nil {
|
|
|
|
if len(subiters) != int(sdb.subtrieWorkers) {
|
|
|
|
return fmt.Errorf("expected to restore %d iterators, got %d", sdb.subtrieWorkers, len(subiters))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
subitersA := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
|
|
|
subitersB := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
|
|
|
for i := 0; i < int(sdb.subtrieWorkers); i++ {
|
|
|
|
it, aux := utils.NewSymmetricDifferenceIterator(subitersA[i], subitersB[i])
|
|
|
|
it = tracker.Tracked(it)
|
|
|
|
subiters = append(subiters, it)
|
|
|
|
auxes = append(auxes, aux)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
logger := log.New("hash", args.BlockHash, "number", args.BlockNumber)
|
|
|
|
// errgroup will cancel if any group fails
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
for i := uint(0); i < sdb.subtrieWorkers; i++ {
|
|
|
|
func(subdiv uint) {
|
|
|
|
g.Go(func() error {
|
|
|
|
// a, b := subitersA[subdiv], subitersB[subdiv]
|
2023-08-01 18:27:54 +00:00
|
|
|
return sdb.processAccounts(ctx,
|
2023-08-26 17:20:29 +00:00
|
|
|
subiters[subdiv], auxes[subdiv],
|
|
|
|
params.watchedAddressesLeafPaths,
|
2023-08-01 18:27:54 +00:00
|
|
|
nodeSink, ipldSink, logger,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}(i)
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-08-01 18:27:54 +00:00
|
|
|
return g.Wait()
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-25 08:17:15 +00:00
|
|
|
// processAccounts processes account creations and deletions, and returns a set of updated
|
|
|
|
// existing accounts, indexed by leaf key.
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processAccounts(
|
2023-08-01 18:27:54 +00:00
|
|
|
ctx context.Context,
|
2023-08-26 17:20:29 +00:00
|
|
|
it trie.NodeIterator, aux *utils.SymmDiffAux,
|
|
|
|
watchedAddressesLeafPaths [][]byte,
|
2023-07-23 17:00:37 +00:00
|
|
|
nodeSink sdtypes.StateNodeSink, ipldSink sdtypes.IPLDSink,
|
|
|
|
logger log.Logger,
|
2023-07-25 08:17:15 +00:00
|
|
|
) error {
|
2023-08-05 09:24:21 +00:00
|
|
|
logger.Trace("statediff/processAccounts BEGIN")
|
|
|
|
defer metrics.ReportAndUpdateDuration("statediff/processAccounts END",
|
|
|
|
time.Now(), logger, metrics.IndexerMetrics.ProcessAccountsTimer)
|
2023-07-25 08:17:15 +00:00
|
|
|
|
|
|
|
updates := make(accountUpdateMap)
|
|
|
|
// Cache the RLP of the previous node. When we hit a value node this will be the parent blob.
|
|
|
|
var prevBlob []byte
|
2023-08-01 18:27:54 +00:00
|
|
|
prevBlob = it.NodeBlob()
|
2023-06-14 12:43:34 +00:00
|
|
|
for it.Next(true) {
|
2023-08-01 18:27:54 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2023-06-14 12:43:34 +00:00
|
|
|
// ignore node if it is not along paths of interest
|
2023-06-23 12:42:55 +00:00
|
|
|
if !isWatchedPathPrefix(watchedAddressesLeafPaths, it.Path()) {
|
2023-06-14 12:43:34 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-08-26 17:20:29 +00:00
|
|
|
if aux.FromA() { // Node exists in the old trie
|
2023-07-23 17:00:37 +00:00
|
|
|
if it.Leaf() {
|
2023-07-25 08:17:15 +00:00
|
|
|
var account types.StateAccount
|
|
|
|
if err := rlp.DecodeBytes(it.LeafBlob(), &account); err != nil {
|
|
|
|
return err
|
2023-07-23 17:00:37 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
leafKey := make([]byte, len(it.LeafKey()))
|
|
|
|
copy(leafKey, it.LeafKey())
|
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
if aux.CommonPath() {
|
2023-07-25 08:17:15 +00:00
|
|
|
// If B also contains this leaf node, this is the old state of an updated account.
|
|
|
|
if update, ok := updates[string(leafKey)]; ok {
|
|
|
|
update.oldRoot = account.Root
|
|
|
|
} else {
|
|
|
|
updates[string(leafKey)] = &accountUpdate{oldRoot: account.Root}
|
2023-07-23 17:00:37 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
} else {
|
|
|
|
// This node was removed, meaning the account was deleted. Emit empty
|
|
|
|
// "removed" records for the state node and all storage all storage slots.
|
|
|
|
err := sdb.processAccountDeletion(leafKey, account, nodeSink)
|
2023-07-23 17:00:37 +00:00
|
|
|
if err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return err
|
2023-07-23 17:00:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
// Node exists in the new trie
|
2023-06-14 12:43:34 +00:00
|
|
|
if it.Leaf() {
|
2023-07-25 08:17:15 +00:00
|
|
|
accountW, err := sdb.decodeStateLeaf(it, prevBlob)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
if aux.CommonPath() {
|
2023-07-25 08:17:15 +00:00
|
|
|
// If A also contains this leaf node, this is the new state of an updated account.
|
|
|
|
if update, ok := updates[string(accountW.LeafKey)]; ok {
|
|
|
|
update.new = *accountW
|
|
|
|
} else {
|
|
|
|
updates[string(accountW.LeafKey)] = &accountUpdate{new: *accountW}
|
|
|
|
}
|
|
|
|
} else { // account was created
|
|
|
|
err := sdb.processAccountCreation(accountW, ipldSink, nodeSink)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-06-23 12:42:55 +00:00
|
|
|
} else {
|
2023-07-25 08:17:15 +00:00
|
|
|
// New trie nodes will be written to blockstore only.
|
|
|
|
// Reminder: this includes leaf nodes, since the geth iterator.Leaf() actually
|
|
|
|
// signifies a "value" node.
|
2023-06-23 12:42:55 +00:00
|
|
|
if it.Hash() == zeroHash {
|
2023-06-14 12:43:34 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-08-01 18:27:54 +00:00
|
|
|
// TODO - this can be handled when value node is (craeted?)
|
2023-06-14 12:43:34 +00:00
|
|
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
|
|
|
copy(nodeVal, it.NodeBlob())
|
2023-06-23 12:42:55 +00:00
|
|
|
// if doing a selective diff, we need to ensure this is a watched path
|
2023-06-14 12:43:34 +00:00
|
|
|
if len(watchedAddressesLeafPaths) > 0 {
|
|
|
|
var elements []interface{}
|
|
|
|
if err := rlp.DecodeBytes(nodeVal, &elements); err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
ok, err := isLeaf(elements)
|
|
|
|
if err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
if ok {
|
|
|
|
partialPath := utils.CompactToHex(elements[0].([]byte))
|
|
|
|
valueNodePath := append(it.Path(), partialPath...)
|
|
|
|
if !isWatchedPath(watchedAddressesLeafPaths, valueNodePath) {
|
|
|
|
continue
|
|
|
|
}
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
}
|
2023-07-19 06:35:54 +00:00
|
|
|
if err := ipldSink(sdtypes.IPLD{
|
2023-06-23 12:42:55 +00:00
|
|
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, it.Hash().Bytes()).String(),
|
2023-06-14 12:43:34 +00:00
|
|
|
Content: nodeVal,
|
|
|
|
}); err != nil {
|
2023-07-25 08:17:15 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-07-25 08:17:15 +00:00
|
|
|
prevBlob = nodeVal
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, update := range updates {
|
|
|
|
var storageDiff []sdtypes.StorageLeafNode
|
2023-08-05 09:24:21 +00:00
|
|
|
err := sdb.processStorageUpdates(
|
2023-07-25 08:17:15 +00:00
|
|
|
update.oldRoot, update.new.Account.Root,
|
|
|
|
appender(&storageDiff), ipldSink,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error processing incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = nodeSink(sdtypes.StateLeafNode{
|
|
|
|
AccountWrapper: update.new,
|
|
|
|
StorageDiff: storageDiff,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
}
|
2023-07-23 17:00:37 +00:00
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
metrics.IndexerMetrics.DifferenceIteratorCounter.Inc(int64(aux.Count()))
|
2023-07-25 08:17:15 +00:00
|
|
|
return it.Error()
|
|
|
|
}
|
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processAccountDeletion(
|
2023-07-25 08:17:15 +00:00
|
|
|
leafKey []byte, account types.StateAccount, nodeSink sdtypes.StateNodeSink,
|
|
|
|
) error {
|
|
|
|
diff := sdtypes.StateLeafNode{
|
|
|
|
AccountWrapper: sdtypes.AccountWrapper{
|
|
|
|
LeafKey: leafKey,
|
|
|
|
CID: shared.RemovedNodeStateCID,
|
|
|
|
},
|
|
|
|
Removed: true,
|
|
|
|
}
|
|
|
|
err := sdb.processRemovedAccountStorage(account.Root, appender(&diff.StorageDiff))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed building storage diffs for removed state account with key %x\r\nerror: %w", leafKey, err)
|
|
|
|
}
|
|
|
|
return nodeSink(diff)
|
|
|
|
}
|
|
|
|
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processAccountCreation(
|
2023-07-25 08:17:15 +00:00
|
|
|
accountW *sdtypes.AccountWrapper, ipldSink sdtypes.IPLDSink, nodeSink sdtypes.StateNodeSink,
|
|
|
|
) error {
|
|
|
|
diff := sdtypes.StateLeafNode{
|
|
|
|
AccountWrapper: *accountW,
|
|
|
|
}
|
|
|
|
if !bytes.Equal(accountW.Account.CodeHash, nullCodeHash) {
|
|
|
|
// For contract creations, any storage node contained is a diff
|
2023-08-05 09:24:21 +00:00
|
|
|
err := sdb.processStorageCreations(accountW.Account.Root, appender(&diff.StorageDiff), ipldSink)
|
2023-07-25 08:17:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed building eventual storage diffs for node with leaf key %x\r\nerror: %w", accountW.LeafKey, err)
|
|
|
|
}
|
|
|
|
// emit codehash => code mappings for contract
|
|
|
|
codeHash := common.BytesToHash(accountW.Account.CodeHash)
|
|
|
|
code, err := sdb.stateCache.ContractCode(codeHash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %w", codeHash, err)
|
|
|
|
}
|
|
|
|
if err := ipldSink(sdtypes.IPLD{
|
|
|
|
CID: ipld.Keccak256ToCid(ipld.RawBinary, codeHash.Bytes()).String(),
|
|
|
|
Content: code,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodeSink(diff)
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
2023-06-23 12:42:55 +00:00
|
|
|
// decodes account at leaf and encodes RLP data to CID
|
|
|
|
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" (which is not something
|
|
|
|
// that actually exists in an MMPT), therefore we pass the parent node blob as the leaf RLP.
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) decodeStateLeaf(it trie.NodeIterator, parentBlob []byte) (*sdtypes.AccountWrapper, error) {
|
2023-06-14 12:43:34 +00:00
|
|
|
var account types.StateAccount
|
2023-06-23 12:42:55 +00:00
|
|
|
if err := rlp.DecodeBytes(it.LeafBlob(), &account); err != nil {
|
|
|
|
return nil, fmt.Errorf("error decoding account at leaf key %x: %w", it.LeafKey(), err)
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
|
2023-07-25 08:17:15 +00:00
|
|
|
leafKey := make([]byte, len(it.LeafKey()))
|
|
|
|
copy(leafKey, it.LeafKey())
|
2023-06-23 12:42:55 +00:00
|
|
|
return &sdtypes.AccountWrapper{
|
|
|
|
LeafKey: it.LeafKey(),
|
2023-06-14 12:43:34 +00:00
|
|
|
Account: &account,
|
2023-06-23 12:42:55 +00:00
|
|
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(parentBlob)).String(),
|
2023-06-14 12:43:34 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-08-05 09:24:21 +00:00
|
|
|
// processStorageCreations processes the storage node records for a newly created account
|
2023-07-25 08:17:15 +00:00
|
|
|
// i.e. it returns all the storage nodes at this state, since there is no previous state.
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processStorageCreations(
|
2023-07-25 08:17:15 +00:00
|
|
|
sr common.Hash, storageSink sdtypes.StorageNodeSink, ipldSink sdtypes.IPLDSink,
|
|
|
|
) error {
|
2023-08-05 09:24:21 +00:00
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.ProcessStorageCreationsTimer)
|
2023-06-23 12:42:55 +00:00
|
|
|
if sr == emptyContractRoot {
|
2023-06-14 12:43:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-19 06:35:54 +00:00
|
|
|
log.Debug("Storage root for eventual diff", "root", sr)
|
|
|
|
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Info("error in build storage diff eventual", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
2023-06-23 12:42:55 +00:00
|
|
|
|
|
|
|
var prevBlob []byte
|
2023-07-25 08:17:15 +00:00
|
|
|
it := sTrie.NodeIterator(make([]byte, 0))
|
2023-06-14 12:43:34 +00:00
|
|
|
for it.Next(true) {
|
|
|
|
if it.Leaf() {
|
2023-07-25 08:17:15 +00:00
|
|
|
storageLeafNode := sdb.decodeStorageLeaf(it, prevBlob)
|
2023-07-19 06:35:54 +00:00
|
|
|
if err := storageSink(storageLeafNode); err != nil {
|
2023-06-14 12:43:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
|
|
|
copy(nodeVal, it.NodeBlob())
|
2023-07-19 06:35:54 +00:00
|
|
|
if err := ipldSink(sdtypes.IPLD{
|
2023-06-23 12:42:55 +00:00
|
|
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, it.Hash().Bytes()).String(),
|
2023-06-14 12:43:34 +00:00
|
|
|
Content: nodeVal,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-06-23 12:42:55 +00:00
|
|
|
prevBlob = nodeVal
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return it.Error()
|
|
|
|
}
|
|
|
|
|
2023-08-05 09:24:21 +00:00
|
|
|
// processStorageUpdates builds the storage diff node objects for all nodes that exist in a different state at B than A
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processStorageUpdates(
|
2023-07-25 08:17:15 +00:00
|
|
|
oldroot common.Hash, newroot common.Hash,
|
|
|
|
storageSink sdtypes.StorageNodeSink,
|
|
|
|
ipldSink sdtypes.IPLDSink,
|
|
|
|
) error {
|
2023-08-05 09:24:21 +00:00
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.ProcessStorageUpdatesTimer)
|
2023-06-23 12:42:55 +00:00
|
|
|
if newroot == oldroot {
|
2023-06-14 12:43:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
2023-07-19 06:35:54 +00:00
|
|
|
log.Trace("Storage roots for incremental diff", "old", oldroot, "new", newroot)
|
|
|
|
oldTrie, err := sdb.stateCache.OpenTrie(oldroot)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-19 06:35:54 +00:00
|
|
|
newTrie, err := sdb.stateCache.OpenTrie(newroot)
|
2023-06-14 12:43:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-06-23 12:42:55 +00:00
|
|
|
var prevBlob []byte
|
2023-07-25 08:17:15 +00:00
|
|
|
a, b := oldTrie.NodeIterator(nil), newTrie.NodeIterator(nil)
|
2023-08-26 17:20:29 +00:00
|
|
|
it, aux := utils.NewSymmetricDifferenceIterator(a, b)
|
2023-06-14 12:43:34 +00:00
|
|
|
for it.Next(true) {
|
2023-08-26 17:20:29 +00:00
|
|
|
if aux.FromA() {
|
|
|
|
if it.Leaf() && !aux.CommonPath() {
|
2023-07-23 17:00:37 +00:00
|
|
|
// If this node's leaf key is absent from B, the storage slot was vacated.
|
|
|
|
// In that case, emit an empty "removed" storage node record.
|
|
|
|
if err := storageSink(sdtypes.StorageLeafNode{
|
|
|
|
CID: shared.RemovedNodeStorageCID,
|
|
|
|
Removed: true,
|
|
|
|
LeafKey: []byte(it.LeafKey()),
|
|
|
|
Value: []byte{},
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2023-06-14 12:43:34 +00:00
|
|
|
if it.Leaf() {
|
2023-07-25 08:17:15 +00:00
|
|
|
storageLeafNode := sdb.decodeStorageLeaf(it, prevBlob)
|
2023-07-19 06:35:54 +00:00
|
|
|
if err := storageSink(storageLeafNode); err != nil {
|
2023-07-23 17:00:37 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
} else {
|
2023-06-23 12:42:55 +00:00
|
|
|
if it.Hash() == zeroHash {
|
2023-06-14 12:43:34 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
|
|
|
copy(nodeVal, it.NodeBlob())
|
2023-07-19 06:35:54 +00:00
|
|
|
if err := ipldSink(sdtypes.IPLD{
|
|
|
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, it.Hash().Bytes()).String(),
|
2023-06-14 12:43:34 +00:00
|
|
|
Content: nodeVal,
|
|
|
|
}); err != nil {
|
2023-07-23 17:00:37 +00:00
|
|
|
return err
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
2023-06-23 12:42:55 +00:00
|
|
|
prevBlob = nodeVal
|
2023-06-14 12:43:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return it.Error()
|
|
|
|
}
|
|
|
|
|
2023-08-05 09:24:21 +00:00
|
|
|
// processRemovedAccountStorage builds the "removed" diffs for all the storage nodes for a destroyed account
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) processRemovedAccountStorage(
|
2023-08-05 09:24:21 +00:00
|
|
|
sr common.Hash, storageSink sdtypes.StorageNodeSink,
|
|
|
|
) error {
|
2023-08-26 17:20:29 +00:00
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.ProcessRemovedAccountStorageTimer)
|
2023-08-05 09:24:21 +00:00
|
|
|
if sr == emptyContractRoot {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
log.Debug("Storage root for removed diffs", "root", sr)
|
|
|
|
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
|
|
|
if err != nil {
|
|
|
|
log.Info("error in build removed account storage diffs", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
it := sTrie.NodeIterator(nil)
|
|
|
|
for it.Next(true) {
|
|
|
|
if it.Leaf() { // only leaf values are indexed, don't need to demarcate removed intermediate nodes
|
|
|
|
leafKey := make([]byte, len(it.LeafKey()))
|
|
|
|
copy(leafKey, it.LeafKey())
|
|
|
|
if err := storageSink(sdtypes.StorageLeafNode{
|
|
|
|
CID: shared.RemovedNodeStorageCID,
|
|
|
|
Removed: true,
|
|
|
|
LeafKey: leafKey,
|
|
|
|
Value: []byte{},
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return it.Error()
|
|
|
|
}
|
|
|
|
|
2023-08-02 18:23:24 +00:00
|
|
|
// decodes slot at leaf and encodes RLP data to CID
|
|
|
|
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" (which is not something
|
|
|
|
// that actually exists in an MMPT), therefore we pass the parent node blob as the leaf RLP.
|
2023-08-26 17:20:29 +00:00
|
|
|
func (sdb *builder) decodeStorageLeaf(it trie.NodeIterator, parentBlob []byte) sdtypes.StorageLeafNode {
|
2023-08-02 18:23:24 +00:00
|
|
|
leafKey := make([]byte, len(it.LeafKey()))
|
|
|
|
copy(leafKey, it.LeafKey())
|
|
|
|
value := make([]byte, len(it.LeafBlob()))
|
|
|
|
copy(value, it.LeafBlob())
|
|
|
|
|
|
|
|
return sdtypes.StorageLeafNode{
|
|
|
|
LeafKey: leafKey,
|
|
|
|
Value: value,
|
|
|
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(parentBlob)).String(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-23 12:42:55 +00:00
|
|
|
// isWatchedPathPrefix checks if a node path is a prefix (ancestor) to one of the watched addresses.
|
|
|
|
// An empty watch list means all paths are watched.
|
|
|
|
func isWatchedPathPrefix(watchedLeafPaths [][]byte, path []byte) bool {
|
|
|
|
if len(watchedLeafPaths) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for _, watched := range watchedLeafPaths {
|
|
|
|
if bytes.HasPrefix(watched, path) {
|
2023-06-14 12:43:34 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-06-23 12:42:55 +00:00
|
|
|
// isWatchedPath checks if a node path corresponds to one of the watched addresses
|
|
|
|
func isWatchedPath(watchedLeafPaths [][]byte, leafPath []byte) bool {
|
|
|
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.IsWatchedAddressTimer)
|
|
|
|
for _, watched := range watchedLeafPaths {
|
|
|
|
if bytes.Equal(watched, leafPath) {
|
2023-06-14 12:43:34 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// isLeaf checks if the node we are at is a leaf
|
|
|
|
func isLeaf(elements []interface{}) (bool, error) {
|
|
|
|
if len(elements) > 2 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
if len(elements) < 2 {
|
|
|
|
return false, fmt.Errorf("node cannot be less than two elements in length")
|
|
|
|
}
|
|
|
|
switch elements[0].([]byte)[0] / 16 {
|
|
|
|
case '\x00':
|
|
|
|
return false, nil
|
|
|
|
case '\x01':
|
|
|
|
return false, nil
|
|
|
|
case '\x02':
|
|
|
|
return true, nil
|
|
|
|
case '\x03':
|
|
|
|
return true, nil
|
|
|
|
default:
|
|
|
|
return false, fmt.Errorf("unknown hex prefix")
|
|
|
|
}
|
|
|
|
}
|