v5 part 1 #269
1
.gitignore
vendored
1
.gitignore
vendored
@ -56,6 +56,5 @@ related-repositories/hive/**
|
||||
related-repositories/ipld-eth-db/**
|
||||
statediff/indexer/database/sql/statediffing_test_file.sql
|
||||
statediff/statediffing_test_file.sql
|
||||
statediff/known_gaps.sql
|
||||
related-repositories/foundry-test/
|
||||
related-repositories/ipld-eth-db/
|
||||
|
@ -280,14 +280,13 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
}
|
||||
}
|
||||
p := statediff.Config{
|
||||
IndexerConfig: indexerConfig,
|
||||
KnownGapsFilePath: ctx.String(utils.StateDiffKnownGapsFilePath.Name),
|
||||
ID: nodeID,
|
||||
ClientName: clientName,
|
||||
Context: context.Background(),
|
||||
EnableWriteLoop: ctx.Bool(utils.StateDiffWritingFlag.Name),
|
||||
NumWorkers: ctx.Uint(utils.StateDiffWorkersFlag.Name),
|
||||
WaitForSync: ctx.Bool(utils.StateDiffWaitForSync.Name),
|
||||
IndexerConfig: indexerConfig,
|
||||
ID: nodeID,
|
||||
ClientName: clientName,
|
||||
Context: context.Background(),
|
||||
EnableWriteLoop: ctx.Bool(utils.StateDiffWritingFlag.Name),
|
||||
NumWorkers: ctx.Uint(utils.StateDiffWorkersFlag.Name),
|
||||
WaitForSync: ctx.Bool(utils.StateDiffWaitForSync.Name),
|
||||
}
|
||||
utils.RegisterStateDiffService(stack, eth, &cfg.Eth, p, backend)
|
||||
}
|
||||
|
@ -175,7 +175,6 @@ var (
|
||||
utils.StateDiffFileMode,
|
||||
utils.StateDiffFileCsvDir,
|
||||
utils.StateDiffFilePath,
|
||||
utils.StateDiffKnownGapsFilePath,
|
||||
utils.StateDiffWaitForSync,
|
||||
utils.StateDiffWatchedAddressesFilePath,
|
||||
configFileFlag,
|
||||
|
@ -1057,11 +1057,6 @@ var (
|
||||
Name: "statediff.file.path",
|
||||
Usage: "Full path (including filename) to write statediff data out to when operating in sql file mode",
|
||||
}
|
||||
StateDiffKnownGapsFilePath = &cli.StringFlag{
|
||||
Name: "statediff.knowngapsfile.path",
|
||||
Usage: "Full path (including filename) to write knownGaps statements when the DB is unavailable.",
|
||||
Value: "./known_gaps.sql",
|
||||
}
|
||||
StateDiffWatchedAddressesFilePath = &cli.StringFlag{
|
||||
Name: "statediff.file.wapath",
|
||||
Usage: "Full path (including filename) to write statediff watched addresses out to when operating in file mode",
|
||||
|
@ -30,8 +30,6 @@ import (
|
||||
type Config struct {
|
||||
// The configuration used for the stateDiff Indexer
|
||||
IndexerConfig interfaces.Config
|
||||
// The filepath to write knownGaps insert statements if we can't connect to the DB.
|
||||
KnownGapsFilePath string
|
||||
// A unique ID used for this service
|
||||
ID string
|
||||
// Name for the client this service is running
|
||||
|
@ -1,17 +0,0 @@
|
||||
# Overview
|
||||
|
||||
This document will provide some insight into the `known_gaps` table, their use cases, and implementation. Please refer to the [following PR](https://github.com/vulcanize/go-ethereum/pull/217) and the [following epic](https://github.com/vulcanize/ops/issues/143) to grasp their inception.
|
||||
|
||||

|
||||
|
||||
# Use Cases
|
||||
|
||||
The known gaps table is updated when the following events occur:
|
||||
|
||||
1. At start up we check the latest block from the `eth.headers_cid` table. We compare the first block that we are processing with the latest block from the DB. If they are not one unit of expectedDifference away from each other, add the gap between the two blocks.
|
||||
2. If there is any error in processing a block (db connection, deadlock, etc), add that block to the knownErrorBlocks slice, when the next block is successfully written, write this slice into the DB.
|
||||
|
||||
# Glossary
|
||||
|
||||
1. `expectedDifference (number)` - This number indicates what the difference between two blocks should be. If we are capturing all events on a geth node then this number would be `1`. But once we scale nodes, the `expectedDifference` might be `2` or greater.
|
||||
2. `processingKey (number)` - This number can be used to keep track of different geth nodes and their specific `expectedDifference`.
|
Binary file not shown.
Before Width: | Height: | Size: 33 KiB |
@ -32,22 +32,22 @@ import (
|
||||
)
|
||||
|
||||
// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface.
|
||||
func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, nodeInfo node.Info, config interfaces.Config) (sql.Database, interfaces.StateDiffIndexer, error) {
|
||||
func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, nodeInfo node.Info, config interfaces.Config) (interfaces.StateDiffIndexer, error) {
|
||||
switch config.Type() {
|
||||
case shared.FILE:
|
||||
log.Info("Starting statediff service in SQL file writing mode")
|
||||
fc, ok := config.(file.Config)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("file config is not the correct type: got %T, expected %T", config, file.Config{})
|
||||
return nil, fmt.Errorf("file config is not the correct type: got %T, expected %T", config, file.Config{})
|
||||
}
|
||||
fc.NodeInfo = nodeInfo
|
||||
ind, err := file.NewStateDiffIndexer(ctx, chainConfig, fc)
|
||||
return nil, ind, err
|
||||
return ind, err
|
||||
case shared.POSTGRES:
|
||||
log.Info("Starting statediff service in Postgres writing mode")
|
||||
pgc, ok := config.(postgres.Config)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("postgres config is not the correct type: got %T, expected %T", config, postgres.Config{})
|
||||
return nil, fmt.Errorf("postgres config is not the correct type: got %T, expected %T", config, postgres.Config{})
|
||||
}
|
||||
var err error
|
||||
var driver sql.Driver
|
||||
@ -55,27 +55,27 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, n
|
||||
case postgres.PGX:
|
||||
driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
case postgres.SQLX:
|
||||
driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unrecognized Postgres driver type: %s", pgc.Driver)
|
||||
return nil, fmt.Errorf("unrecognized Postgres driver type: %s", pgc.Driver)
|
||||
}
|
||||
db := postgres.NewPostgresDB(driver)
|
||||
ind, err := sql.NewStateDiffIndexer(ctx, chainConfig, db)
|
||||
return db, ind, err
|
||||
return ind, err
|
||||
case shared.DUMP:
|
||||
log.Info("Starting statediff service in data dump mode")
|
||||
dumpc, ok := config.(dump.Config)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("dump config is not the correct type: got %T, expected %T", config, dump.Config{})
|
||||
return nil, fmt.Errorf("dump config is not the correct type: got %T, expected %T", config, dump.Config{})
|
||||
}
|
||||
return nil, dump.NewStateDiffIndexer(chainConfig, dumpc), nil
|
||||
return dump.NewStateDiffIndexer(chainConfig, dumpc), nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unrecognized database type: %s", config.Type())
|
||||
return nil, fmt.Errorf("unrecognized database type: %s", config.Type())
|
||||
}
|
||||
}
|
||||
|
@ -17,11 +17,17 @@
|
||||
package dump
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
|
||||
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -79,7 +85,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
headerNode, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||
}
|
||||
@ -146,7 +152,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
|
||||
t = time.Now()
|
||||
// Publish and index uncles
|
||||
err = sdi.processUncles(blockTx, headerID, block.Number(), uncleNodes)
|
||||
err = sdi.processUncles(blockTx, headerID, block.Number(), block.UncleHash(), block.Uncles())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -197,7 +203,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
|
||||
StateRoot: header.Root.String(),
|
||||
RctRoot: header.ReceiptHash.String(),
|
||||
TxRoot: header.TxHash.String(),
|
||||
UncleRoot: header.UncleHash.String(),
|
||||
UnclesHash: header.UncleHash.String(),
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
}
|
||||
@ -206,25 +212,39 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
|
||||
}
|
||||
|
||||
// processUncles publishes and indexes uncle IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) error {
|
||||
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
|
||||
// publish and index uncles
|
||||
for _, uncleNode := range uncleNodes {
|
||||
tx.cacheIPLD(uncleNode)
|
||||
uncleEncoding, err := rlp.EncodeToBytes(uncles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
||||
}
|
||||
unclesCID, err := ipld2.RawdataToCid(ipld2.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(unclesCID.Hash()).String()
|
||||
tx.cacheDirect(prefixedKey, uncleEncoding)
|
||||
for i, uncle := range uncles {
|
||||
var uncleReward *big.Int
|
||||
// in PoA networks uncle reward is 0
|
||||
if sdi.chainConfig.Clique != nil {
|
||||
uncleReward = big.NewInt(0)
|
||||
} else {
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
|
||||
}
|
||||
uncle := models.UncleModel{
|
||||
BlockNumber: blockNumber.String(),
|
||||
HeaderID: headerID,
|
||||
CID: uncleNode.Cid().String(),
|
||||
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
|
||||
ParentHash: uncleNode.ParentHash.String(),
|
||||
BlockHash: uncleNode.Hash().String(),
|
||||
CID: unclesCID.String(),
|
||||
MhKey: shared.MultihashKeyFromCID(unclesCID),
|
||||
ParentHash: uncle.ParentHash.String(),
|
||||
BlockHash: uncle.Hash().String(),
|
||||
Reward: uncleReward.String(),
|
||||
Index: int64(i),
|
||||
}
|
||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", uncle); err != nil {
|
||||
return err
|
||||
@ -442,7 +462,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
||||
StatePath: stateNode.Path,
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
CodeHash: account.CodeHash,
|
||||
CodeHash: hexutil.Encode(account.CodeHash),
|
||||
StorageRoot: account.Root.String(),
|
||||
}
|
||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", accountModel); err != nil {
|
||||
|
@ -252,7 +252,7 @@ func (csw *CSVWriter) upsertHeaderCID(header models.HeaderModel) {
|
||||
var values []interface{}
|
||||
values = append(values, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
|
||||
header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UncleRoot, header.Bloom, strconv.FormatUint(header.Timestamp, 10), header.MhKey, 1, header.Coinbase)
|
||||
header.RctRoot, header.UnclesHash, header.Bloom, strconv.FormatUint(header.Timestamp, 10), header.MhKey, 1, header.Coinbase)
|
||||
csw.rows <- tableRow{types.TableHeader, values}
|
||||
indexerMetrics.blocks.Inc(1)
|
||||
}
|
||||
@ -260,7 +260,7 @@ func (csw *CSVWriter) upsertHeaderCID(header models.HeaderModel) {
|
||||
func (csw *CSVWriter) upsertUncleCID(uncle models.UncleModel) {
|
||||
var values []interface{}
|
||||
values = append(values, uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
|
||||
uncle.Reward, uncle.MhKey)
|
||||
uncle.Reward, uncle.MhKey, uncle.Index)
|
||||
csw.rows <- tableRow{types.TableUncle, values}
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -26,11 +27,15 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -149,7 +154,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
headerNode, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||
}
|
||||
@ -200,7 +205,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
t = time.Now()
|
||||
|
||||
// write uncles
|
||||
sdi.processUncles(headerID, block.Number(), uncleNodes)
|
||||
sdi.processUncles(headerID, block.Number(), block.UncleHash(), block.Uncles())
|
||||
tDiff = time.Since(t)
|
||||
indexerMetrics.tUncleProcessing.Update(tDiff)
|
||||
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
|
||||
@ -255,35 +260,50 @@ func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node
|
||||
StateRoot: header.Root.String(),
|
||||
RctRoot: header.ReceiptHash.String(),
|
||||
TxRoot: header.TxHash.String(),
|
||||
UncleRoot: header.UncleHash.String(),
|
||||
UnclesHash: header.UncleHash.String(),
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
})
|
||||
return headerID
|
||||
}
|
||||
|
||||
// processUncles writes uncle IPLD insert SQL stmts to a file
|
||||
func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) {
|
||||
// processUncles publishes and indexes uncle IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
|
||||
// publish and index uncles
|
||||
for _, uncleNode := range uncleNodes {
|
||||
sdi.fileWriter.upsertIPLDNode(blockNumber.String(), uncleNode)
|
||||
uncleEncoding, err := rlp.EncodeToBytes(uncles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
||||
}
|
||||
unclesCID, err := ipld2.RawdataToCid(ipld2.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(unclesCID.Hash()).String()
|
||||
sdi.fileWriter.upsertIPLDDirect(blockNumber.String(), prefixedKey, uncleEncoding)
|
||||
for i, uncle := range uncles {
|
||||
var uncleReward *big.Int
|
||||
// in PoA networks uncle reward is 0
|
||||
if sdi.chainConfig.Clique != nil {
|
||||
uncleReward = big.NewInt(0)
|
||||
} else {
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
|
||||
}
|
||||
sdi.fileWriter.upsertUncleCID(models.UncleModel{
|
||||
BlockNumber: blockNumber.String(),
|
||||
HeaderID: headerID,
|
||||
CID: uncleNode.Cid().String(),
|
||||
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
|
||||
ParentHash: uncleNode.ParentHash.String(),
|
||||
BlockHash: uncleNode.Hash().String(),
|
||||
CID: unclesCID.String(),
|
||||
MhKey: shared.MultihashKeyFromCID(unclesCID),
|
||||
ParentHash: uncle.ParentHash.String(),
|
||||
BlockHash: uncle.Hash().String(),
|
||||
Reward: uncleReward.String(),
|
||||
Index: int64(i),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processArgs bundles arguments to processReceiptsAndTxs
|
||||
@ -485,7 +505,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
||||
StatePath: stateNode.Path,
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
CodeHash: account.CodeHash,
|
||||
CodeHash: hexutil.Encode(account.CodeHash),
|
||||
StorageRoot: account.Root.String(),
|
||||
}
|
||||
sdi.fileWriter.upsertStateAccount(accountModel)
|
||||
|
@ -577,7 +577,7 @@ func testPublishAndIndexStateIPLDs(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "0",
|
||||
CodeHash: mocks.ContractCodeHash.Bytes(),
|
||||
CodeHash: mocks.ContractCodeHash.Hex(),
|
||||
StorageRoot: mocks.ContractRoot,
|
||||
Nonce: 1,
|
||||
}, account)
|
||||
@ -592,7 +592,7 @@ func testPublishAndIndexStateIPLDs(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "1000",
|
||||
CodeHash: mocks.AccountCodeHash.Bytes(),
|
||||
CodeHash: mocks.AccountCodeHash.Hex(),
|
||||
StorageRoot: mocks.AccountRoot,
|
||||
Nonce: 0,
|
||||
}, account)
|
||||
@ -687,8 +687,12 @@ func testPublishAndIndexStorageIPLDs(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, 3, len(storageNodes))
|
||||
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||
{
|
||||
gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3)
|
||||
for _, model := range storageNodes {
|
||||
gotStorageNodes[model.StorageKey] = model
|
||||
}
|
||||
expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{
|
||||
common.BytesToHash(mocks.RemovedLeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -696,7 +700,7 @@ func testPublishAndIndexStorageIPLDs(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||
Path: []byte{'\x03'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage2LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -704,7 +708,7 @@ func testPublishAndIndexStorageIPLDs(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||
Path: []byte{'\x0e'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage3LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -713,15 +717,15 @@ func testPublishAndIndexStorageIPLDs(t *testing.T) {
|
||||
Path: []byte{'\x0f'},
|
||||
},
|
||||
}
|
||||
for idx, storageNode := range storageNodes {
|
||||
require.Equal(t, expectedStorageNodes[idx], storageNode)
|
||||
for storageKey, storageNode := range gotStorageNodes {
|
||||
require.Equal(t, expectedStorageNodes[storageKey], storageNode)
|
||||
dc, err = cid.Decode(storageNode.CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mhKey = dshelp.MultihashToDsKey(dc.Hash())
|
||||
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
|
||||
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey, mocks.BlockNumber.Uint64())
|
||||
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey)
|
||||
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -143,11 +143,11 @@ const (
|
||||
ipldInsert = "INSERT INTO public.blocks (block_number, key, data) VALUES ('%s', '%s', '\\x%x');\n"
|
||||
|
||||
headerInsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, " +
|
||||
"state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES " +
|
||||
"state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, mh_key, times_validated, coinbase) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s', %d, '%s');\n"
|
||||
|
||||
uncleInsert = "INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', '%s');\n"
|
||||
uncleInsert = "INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key, index) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', %d);\n"
|
||||
|
||||
txInsert = "INSERT INTO eth.transaction_cids (block_number, header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, " +
|
||||
"value) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', %d, '%s', '\\x%x', %d, '%s');\n"
|
||||
@ -165,7 +165,7 @@ const (
|
||||
"VALUES ('%s', '%s', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
|
||||
|
||||
accountInsert = "INSERT INTO eth.state_accounts (block_number, header_id, state_path, balance, nonce, code_hash, storage_root) " +
|
||||
"VALUES ('%s', '%s', '\\x%x', '%s', %d, '\\x%x', '%s');\n"
|
||||
"VALUES ('%s', '%s', '\\x%x', '%s', %d, '%s', '%s');\n"
|
||||
|
||||
storageInsert = "INSERT INTO eth.storage_cids (block_number, header_id, state_path, storage_leaf_key, cid, storage_path, " +
|
||||
"node_type, diff, mh_key) VALUES ('%s', '%s', '\\x%x', '%s', '%s', '\\x%x', %d, %t, '%s');\n"
|
||||
@ -212,14 +212,14 @@ func (sqw *SQLWriter) upsertIPLDRaw(blockNumber string, codec, mh uint64, raw []
|
||||
func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
|
||||
stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
|
||||
header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase)
|
||||
header.RctRoot, header.UnclesHash, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase)
|
||||
sqw.stmts <- []byte(stmt)
|
||||
indexerMetrics.blocks.Inc(1)
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertUncleCID(uncle models.UncleModel) {
|
||||
sqw.stmts <- []byte(fmt.Sprintf(uncleInsert, uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
|
||||
uncle.Reward, uncle.MhKey))
|
||||
uncle.Reward, uncle.MhKey, uncle.Index))
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) {
|
||||
|
@ -49,7 +49,7 @@ var TableHeader = Table{
|
||||
{name: "state_root", dbType: varchar},
|
||||
{name: "tx_root", dbType: varchar},
|
||||
{name: "receipt_root", dbType: varchar},
|
||||
{name: "uncle_root", dbType: varchar},
|
||||
{name: "uncles_hash", dbType: varchar},
|
||||
{name: "bloom", dbType: bytea},
|
||||
{name: "timestamp", dbType: numeric},
|
||||
{name: "mh_key", dbType: text},
|
||||
@ -97,6 +97,7 @@ var TableUncle = Table{
|
||||
{name: "cid", dbType: text},
|
||||
{name: "reward", dbType: numeric},
|
||||
{name: "mh_key", dbType: text},
|
||||
{name: "index", dbType: integer},
|
||||
},
|
||||
}
|
||||
|
||||
@ -170,7 +171,7 @@ var TableStateAccount = Table{
|
||||
{name: "state_path", dbType: bytea},
|
||||
{name: "balance", dbType: numeric},
|
||||
{name: "nonce", dbType: bigint},
|
||||
{name: "code_hash", dbType: bytea},
|
||||
{name: "code_hash", dbType: varchar},
|
||||
{name: "storage_root", dbType: varchar},
|
||||
},
|
||||
}
|
||||
|
@ -20,16 +20,21 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -100,7 +105,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
headerNode, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||
}
|
||||
@ -201,7 +206,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
|
||||
t = time.Now()
|
||||
// Publish and index uncles
|
||||
err = sdi.processUncles(blockTx, headerID, block.Number(), uncleNodes)
|
||||
err = sdi.processUncles(blockTx, headerID, block.Number(), block.UncleHash(), block.Uncles())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -258,32 +263,46 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
|
||||
StateRoot: header.Root.String(),
|
||||
RctRoot: header.ReceiptHash.String(),
|
||||
TxRoot: header.TxHash.String(),
|
||||
UncleRoot: header.UncleHash.String(),
|
||||
UnclesHash: header.UncleHash.String(),
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
})
|
||||
}
|
||||
|
||||
// processUncles publishes and indexes uncle IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, uncleNodes []*ipld2.EthHeader) error {
|
||||
func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber *big.Int, unclesHash common.Hash, uncles []*types.Header) error {
|
||||
// publish and index uncles
|
||||
for _, uncleNode := range uncleNodes {
|
||||
tx.cacheIPLD(uncleNode)
|
||||
uncleEncoding, err := rlp.EncodeToBytes(uncles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
||||
}
|
||||
unclesCID, err := ipld2.RawdataToCid(ipld2.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(unclesCID.Hash()).String()
|
||||
tx.cacheDirect(prefixedKey, uncleEncoding)
|
||||
for i, uncle := range uncles {
|
||||
var uncleReward *big.Int
|
||||
// in PoA networks uncle reward is 0
|
||||
if sdi.chainConfig.Clique != nil {
|
||||
uncleReward = big.NewInt(0)
|
||||
} else {
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncleNode.Number.Uint64())
|
||||
uncleReward = shared.CalcUncleMinerReward(blockNumber.Uint64(), uncle.Number.Uint64())
|
||||
}
|
||||
uncle := models.UncleModel{
|
||||
BlockNumber: blockNumber.String(),
|
||||
HeaderID: headerID,
|
||||
CID: uncleNode.Cid().String(),
|
||||
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
|
||||
ParentHash: uncleNode.ParentHash.String(),
|
||||
BlockHash: uncleNode.Hash().String(),
|
||||
CID: unclesCID.String(),
|
||||
MhKey: shared.MultihashKeyFromCID(unclesCID),
|
||||
ParentHash: uncle.ParentHash.String(),
|
||||
BlockHash: uncle.Hash().String(),
|
||||
Reward: uncleReward.String(),
|
||||
Index: int64(i),
|
||||
}
|
||||
if err := sdi.dbWriter.upsertUncleCID(tx.dbtx, uncle); err != nil {
|
||||
return err
|
||||
@ -500,7 +519,7 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
|
||||
StatePath: stateNode.Path,
|
||||
Balance: account.Balance.String(),
|
||||
Nonce: account.Nonce,
|
||||
CodeHash: account.CodeHash,
|
||||
CodeHash: hexutil.Encode(account.CodeHash),
|
||||
StorageRoot: account.Root.String(),
|
||||
}
|
||||
if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil {
|
||||
|
@ -356,7 +356,7 @@ func setupTestDataNonCanonical(t *testing.T) {
|
||||
func testPublishAndIndexHeaderNonCanonical(t *testing.T) {
|
||||
// check indexed headers
|
||||
pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT),
|
||||
tx_root, receipt_root, uncle_root, coinbase
|
||||
tx_root, receipt_root, uncles_hash, coinbase
|
||||
FROM eth.header_cids
|
||||
ORDER BY block_number`
|
||||
headerRes := make([]models.HeaderModel, 0)
|
||||
@ -376,7 +376,7 @@ func testPublishAndIndexHeaderNonCanonical(t *testing.T) {
|
||||
TotalDifficulty: mockBlock.Difficulty().String(),
|
||||
TxRoot: mockBlock.TxHash().String(),
|
||||
RctRoot: mockBlock.ReceiptHash().String(),
|
||||
UncleRoot: mockBlock.UncleHash().String(),
|
||||
UnclesHash: mockBlock.UncleHash().String(),
|
||||
Coinbase: mocks.MockHeader.Coinbase.String(),
|
||||
},
|
||||
{
|
||||
@ -386,7 +386,7 @@ func testPublishAndIndexHeaderNonCanonical(t *testing.T) {
|
||||
TotalDifficulty: mockNonCanonicalBlock.Difficulty().String(),
|
||||
TxRoot: mockNonCanonicalBlock.TxHash().String(),
|
||||
RctRoot: mockNonCanonicalBlock.ReceiptHash().String(),
|
||||
UncleRoot: mockNonCanonicalBlock.UncleHash().String(),
|
||||
UnclesHash: mockNonCanonicalBlock.UncleHash().String(),
|
||||
Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(),
|
||||
},
|
||||
{
|
||||
@ -396,7 +396,7 @@ func testPublishAndIndexHeaderNonCanonical(t *testing.T) {
|
||||
TotalDifficulty: mockNonCanonicalBlock2.Difficulty().String(),
|
||||
TxRoot: mockNonCanonicalBlock2.TxHash().String(),
|
||||
RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(),
|
||||
UncleRoot: mockNonCanonicalBlock2.UncleHash().String(),
|
||||
UnclesHash: mockNonCanonicalBlock2.UncleHash().String(),
|
||||
Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(),
|
||||
},
|
||||
}
|
||||
|
@ -54,7 +54,6 @@ type Statements interface {
|
||||
InsertStorageStm() string
|
||||
InsertIPLDStm() string
|
||||
InsertIPLDsStm() string
|
||||
InsertKnownGapsStm() string
|
||||
}
|
||||
|
||||
// Tx interface to accommodate different concrete SQL transaction types
|
||||
|
@ -435,7 +435,7 @@ func TestPGXIndexer(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "0",
|
||||
CodeHash: mocks.ContractCodeHash.Bytes(),
|
||||
CodeHash: mocks.ContractCodeHash.Hex(),
|
||||
StorageRoot: mocks.ContractRoot,
|
||||
Nonce: 1,
|
||||
}, account)
|
||||
@ -450,7 +450,7 @@ func TestPGXIndexer(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "1000",
|
||||
CodeHash: mocks.AccountCodeHash.Bytes(),
|
||||
CodeHash: mocks.AccountCodeHash.Hex(),
|
||||
StorageRoot: mocks.AccountRoot,
|
||||
Nonce: 0,
|
||||
}, account)
|
||||
@ -548,8 +548,12 @@ func TestPGXIndexer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, 3, len(storageNodes))
|
||||
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||
{
|
||||
gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3)
|
||||
for _, model := range storageNodes {
|
||||
gotStorageNodes[model.StorageKey] = model
|
||||
}
|
||||
expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{
|
||||
common.BytesToHash(mocks.RemovedLeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -557,7 +561,7 @@ func TestPGXIndexer(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||
Path: []byte{'\x03'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage2LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -565,7 +569,7 @@ func TestPGXIndexer(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||
Path: []byte{'\x0e'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage3LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -574,8 +578,8 @@ func TestPGXIndexer(t *testing.T) {
|
||||
Path: []byte{'\x0f'},
|
||||
},
|
||||
}
|
||||
for idx, storageNode := range storageNodes {
|
||||
require.Equal(t, expectedStorageNodes[idx], storageNode)
|
||||
for storageKey, storageNode := range gotStorageNodes {
|
||||
require.Equal(t, expectedStorageNodes[storageKey], storageNode)
|
||||
dc, err = cid.Decode(storageNode.CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -48,10 +48,10 @@ func ResolveDriverType(str string) (DriverType, error) {
|
||||
// DefaultConfig are default parameters for connecting to a Postgres sql
|
||||
var DefaultConfig = Config{
|
||||
Hostname: "localhost",
|
||||
Port: 8077,
|
||||
Port: 5432,
|
||||
DatabaseName: "vulcanize_testing",
|
||||
Username: "vdbm",
|
||||
Password: "password",
|
||||
Username: "iannorden",
|
||||
Password: "",
|
||||
}
|
||||
|
||||
// Config holds params for a Postgres db
|
||||
|
@ -38,15 +38,15 @@ type DB struct {
|
||||
// InsertHeaderStm satisfies the sql.Statements interface
|
||||
// Stm == Statement
|
||||
func (db *DB) InsertHeaderStm() string {
|
||||
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
|
||||
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, mh_key, times_validated, coinbase)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
|
||||
ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
|
||||
ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
|
||||
}
|
||||
|
||||
// InsertUncleStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertUncleStm() string {
|
||||
return `INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (block_hash, block_number) DO NOTHING`
|
||||
return `INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key, index) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (block_hash, block_number, index) DO NOTHING`
|
||||
}
|
||||
|
||||
// InsertTxStm satisfies the sql.Statements interface
|
||||
@ -100,10 +100,3 @@ func (db *DB) InsertIPLDStm() string {
|
||||
func (db *DB) InsertIPLDsStm() string {
|
||||
return `INSERT INTO public.blocks (block_number, key, data) VALUES (unnest($1::BIGINT[]), unnest($2::TEXT[]), unnest($3::BYTEA[])) ON CONFLICT DO NOTHING`
|
||||
}
|
||||
|
||||
// InsertKnownGapsStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertKnownGapsStm() string {
|
||||
return `INSERT INTO eth_meta.known_gaps (starting_block_number, ending_block_number, checked_out, processing_key) VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (starting_block_number) DO UPDATE SET (ending_block_number, processing_key) = ($2, $4)
|
||||
WHERE eth_meta.known_gaps.ending_block_number <= $2`
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "0",
|
||||
CodeHash: mocks.ContractCodeHash.Bytes(),
|
||||
CodeHash: mocks.ContractCodeHash.Hex(),
|
||||
StorageRoot: mocks.ContractRoot,
|
||||
Nonce: 1,
|
||||
}, account)
|
||||
@ -443,7 +443,7 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
HeaderID: account.HeaderID,
|
||||
StatePath: stateNode.Path,
|
||||
Balance: "1000",
|
||||
CodeHash: mocks.AccountCodeHash.Bytes(),
|
||||
CodeHash: mocks.AccountCodeHash.Hex(),
|
||||
StorageRoot: mocks.AccountRoot,
|
||||
Nonce: 0,
|
||||
}, account)
|
||||
@ -541,8 +541,12 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, 3, len(storageNodes))
|
||||
expectedStorageNodes := []models.StorageNodeWithStateKeyModel{
|
||||
{
|
||||
gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3)
|
||||
for _, model := range storageNodes {
|
||||
gotStorageNodes[model.StorageKey] = model
|
||||
}
|
||||
expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{
|
||||
common.BytesToHash(mocks.RemovedLeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -550,7 +554,7 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
||||
Path: []byte{'\x03'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage2LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -558,7 +562,7 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
||||
Path: []byte{'\x0e'},
|
||||
},
|
||||
{
|
||||
common.BytesToHash(mocks.Storage3LeafKey).Hex(): {
|
||||
BlockNumber: mocks.BlockNumber.String(),
|
||||
CID: shared.RemovedNodeStorageCID,
|
||||
NodeType: 3,
|
||||
@ -567,8 +571,8 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
Path: []byte{'\x0f'},
|
||||
},
|
||||
}
|
||||
for idx, storageNode := range storageNodes {
|
||||
require.Equal(t, expectedStorageNodes[idx], storageNode)
|
||||
for storageKey, storageNode := range gotStorageNodes {
|
||||
require.Equal(t, expectedStorageNodes[storageKey], storageNode)
|
||||
dc, err = cid.Decode(storageNode.CID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -45,14 +45,14 @@ func (w *Writer) Close() error {
|
||||
}
|
||||
|
||||
/*
|
||||
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
|
||||
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, mh_key, times_validated, coinbase)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
|
||||
ON CONFLICT (block_hash, block_number) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
|
||||
ON CONFLICT (block_hash, block_number) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
|
||||
*/
|
||||
func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
|
||||
_, err := tx.Exec(w.db.Context(), w.db.InsertHeaderStm(),
|
||||
header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.db.NodeID(),
|
||||
header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom,
|
||||
header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UnclesHash, header.Bloom,
|
||||
header.Timestamp, header.MhKey, 1, header.Coinbase)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error upserting header_cids entry: %v", err)
|
||||
@ -62,12 +62,12 @@ func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
|
||||
}
|
||||
|
||||
/*
|
||||
INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, mh_key, index) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (block_hash, block_number) DO NOTHING
|
||||
*/
|
||||
func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error {
|
||||
_, err := tx.Exec(w.db.Context(), w.db.InsertUncleStm(),
|
||||
uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
|
||||
uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey, uncle.Index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error upserting uncle_cids entry: %v", err)
|
||||
}
|
||||
|
@ -125,35 +125,25 @@ func FromBlockJSON(r io.Reader) (*EthHeader, []*EthTx, []*EthTxTrie, error) {
|
||||
|
||||
// FromBlockAndReceipts takes a block and processes it
|
||||
// to return it a set of IPLD nodes for further processing.
|
||||
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
|
||||
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) (*EthHeader, []*EthTx, []*EthTxTrie, []*EthReceipt, []*EthRctTrie, [][]node.Node, [][]cid.Cid, []cid.Cid, error) {
|
||||
// Process the header
|
||||
headerNode, err := NewEthHeader(block.Header())
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Process the uncles
|
||||
uncleNodes := make([]*EthHeader, len(block.Uncles()))
|
||||
for i, uncle := range block.Uncles() {
|
||||
uncleNode, err := NewEthHeader(uncle)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
uncleNodes[i] = uncleNode
|
||||
return nil, nil, nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Process the txs
|
||||
txNodes, txTrieNodes, err := processTransactions(block.Transactions(),
|
||||
block.Header().TxHash[:])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Process the receipts and logs
|
||||
rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := processReceiptsAndLogs(receipts,
|
||||
block.Header().ReceiptHash[:])
|
||||
|
||||
return headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err
|
||||
return headerNode, txNodes, txTrieNodes, rctNodes, tctTrieNodes, logTrieAndLogNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err
|
||||
}
|
||||
|
||||
// processTransactions will take the found transactions in a parsed block body
|
||||
|
@ -34,6 +34,7 @@ type UncleBatch struct {
|
||||
CIDs []string
|
||||
MhKeys []string
|
||||
Rewards []string
|
||||
Indexes []int64
|
||||
}
|
||||
|
||||
// TxBatch holds the arguments for a batch insert of tx data
|
||||
@ -108,7 +109,7 @@ type AccountBatch struct {
|
||||
StatePaths [][]byte
|
||||
Balances []string
|
||||
Nonces []uint64
|
||||
CodeHashes [][]byte
|
||||
CodeHashes []string
|
||||
StorageRoots []string
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ type HeaderModel struct {
|
||||
NodeID string `db:"node_id"`
|
||||
Reward string `db:"reward"`
|
||||
StateRoot string `db:"state_root"`
|
||||
UncleRoot string `db:"uncle_root"`
|
||||
UnclesHash string `db:"uncles_hash"`
|
||||
TxRoot string `db:"tx_root"`
|
||||
RctRoot string `db:"receipt_root"`
|
||||
Bloom []byte `db:"bloom"`
|
||||
@ -54,6 +54,7 @@ type UncleModel struct {
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
Reward string `db:"reward"`
|
||||
Index int64 `db:"index"`
|
||||
}
|
||||
|
||||
// TxModel is the db model for eth.transaction_cids
|
||||
@ -140,7 +141,7 @@ type StateAccountModel struct {
|
||||
StatePath []byte `db:"state_path"`
|
||||
Balance string `db:"balance"`
|
||||
Nonce uint64 `db:"nonce"`
|
||||
CodeHash []byte `db:"code_hash"`
|
||||
CodeHash string `db:"code_hash"`
|
||||
StorageRoot string `db:"storage_root"`
|
||||
}
|
||||
|
||||
@ -159,11 +160,3 @@ type LogsModel struct {
|
||||
Topic2 string `db:"topic2"`
|
||||
Topic3 string `db:"topic3"`
|
||||
}
|
||||
|
||||
// KnownGaps is the data structure for eth_meta.known_gaps
|
||||
type KnownGapsModel struct {
|
||||
StartingBlockNumber string `db:"starting_block_number"`
|
||||
EndingBlockNumber string `db:"ending_block_number"`
|
||||
CheckedOut bool `db:"checked_out"`
|
||||
ProcessingKey int64 `db:"processing_key"`
|
||||
}
|
||||
|
@ -1,273 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package statediff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGapsInsert = "INSERT INTO eth_meta.known_gaps (starting_block_number, ending_block_number, checked_out, processing_key) " +
|
||||
"VALUES ('%s', '%s', %t, %d) " +
|
||||
"ON CONFLICT (starting_block_number) DO UPDATE SET (ending_block_number, processing_key) = ('%s', %d) " +
|
||||
"WHERE eth_meta.known_gaps.ending_block_number <= '%s';\n"
|
||||
dbQueryString = "SELECT MAX(block_number) FROM eth.header_cids"
|
||||
defaultWriteFilePath = "./known_gaps.sql"
|
||||
)
|
||||
|
||||
type KnownGapsState struct {
|
||||
// Should we check for gaps by looking at the DB and comparing the latest block with head
|
||||
checkForGaps bool
|
||||
// Arbitrary processingKey that can be used down the line to differentiate different geth nodes.
|
||||
processingKey int64
|
||||
// This number indicates the expected difference between blocks.
|
||||
// Currently, this is 1 since the geth node processes each block. But down the road this can be used in
|
||||
// Tandom with the processingKey to differentiate block processing logic.
|
||||
expectedDifference *big.Int
|
||||
// Indicates if Geth is in an error state
|
||||
// This is used to indicate the right time to upserts
|
||||
errorState bool
|
||||
// This array keeps track of errorBlocks as they occur.
|
||||
// When the errorState is false again, we can process these blocks.
|
||||
// Do we need a list, can we have /KnownStartErrorBlock and knownEndErrorBlock ints instead?
|
||||
knownErrorBlocks []*big.Int
|
||||
// The filepath to write SQL statements if we can't connect to the DB.
|
||||
writeFilePath string
|
||||
// DB object to use for reading and writing to the DB
|
||||
db sql.Database
|
||||
//Do we have entries in the local sql file that need to be written to the DB
|
||||
sqlFileWaitingForWrite bool
|
||||
// Metrics object used to track metrics.
|
||||
statediffMetrics statediffMetricsHandles
|
||||
}
|
||||
|
||||
// Create a new KnownGapsState struct, currently unused.
|
||||
func NewKnownGapsState(checkForGaps bool, processingKey int64, expectedDifference *big.Int, errorState bool, writeFilePath string, db sql.Database, statediffMetrics statediffMetricsHandles) *KnownGapsState {
|
||||
return &KnownGapsState{
|
||||
checkForGaps: checkForGaps,
|
||||
processingKey: processingKey,
|
||||
expectedDifference: expectedDifference,
|
||||
errorState: errorState,
|
||||
writeFilePath: writeFilePath,
|
||||
db: db,
|
||||
statediffMetrics: statediffMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
func minMax(array []*big.Int) (*big.Int, *big.Int) {
|
||||
var max *big.Int = array[0]
|
||||
var min *big.Int = array[0]
|
||||
for _, value := range array {
|
||||
if max.Cmp(value) == -1 {
|
||||
max = value
|
||||
}
|
||||
if min.Cmp(value) == 1 {
|
||||
min = value
|
||||
}
|
||||
}
|
||||
return min, max
|
||||
}
|
||||
|
||||
// This function actually performs the write of the known gaps. It will try to do the following, it only goes to the next step if a failure occurs.
|
||||
// 1. Write to the DB directly.
|
||||
// 2. Write to sql file locally.
|
||||
// 3. Write to prometheus directly.
|
||||
// 4. Logs and error.
|
||||
func (kg *KnownGapsState) pushKnownGaps(startingBlockNumber *big.Int, endingBlockNumber *big.Int, checkedOut bool, processingKey int64) error {
|
||||
if startingBlockNumber.Cmp(endingBlockNumber) == 1 {
|
||||
return fmt.Errorf("Starting Block %d, is greater than ending block %d", startingBlockNumber, endingBlockNumber)
|
||||
}
|
||||
knownGap := models.KnownGapsModel{
|
||||
StartingBlockNumber: startingBlockNumber.String(),
|
||||
EndingBlockNumber: endingBlockNumber.String(),
|
||||
CheckedOut: checkedOut,
|
||||
ProcessingKey: processingKey,
|
||||
}
|
||||
|
||||
log.Info("Updating Metrics for the start and end block")
|
||||
kg.statediffMetrics.knownGapStart.Update(startingBlockNumber.Int64())
|
||||
kg.statediffMetrics.knownGapEnd.Update(endingBlockNumber.Int64())
|
||||
|
||||
var writeErr error
|
||||
log.Info("Writing known gaps to the DB")
|
||||
if kg.db != nil {
|
||||
dbErr := kg.upsertKnownGaps(knownGap)
|
||||
if dbErr != nil {
|
||||
log.Warn("Error writing knownGaps to DB, writing them to file instead")
|
||||
writeErr = kg.upsertKnownGapsFile(knownGap)
|
||||
}
|
||||
} else {
|
||||
writeErr = kg.upsertKnownGapsFile(knownGap)
|
||||
}
|
||||
if writeErr != nil {
|
||||
log.Error("Unsuccessful when writing to a file", "Error", writeErr)
|
||||
log.Error("Updating Metrics for the start and end error block")
|
||||
log.Error("Unable to write the following Gaps to DB or File", "startBlock", startingBlockNumber, "endBlock", endingBlockNumber)
|
||||
kg.statediffMetrics.knownGapErrorStart.Update(startingBlockNumber.Int64())
|
||||
kg.statediffMetrics.knownGapErrorEnd.Update(endingBlockNumber.Int64())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a simple wrapper function to write gaps from a knownErrorBlocks array.
|
||||
func (kg *KnownGapsState) captureErrorBlocks(knownErrorBlocks []*big.Int) {
|
||||
startErrorBlock, endErrorBlock := minMax(knownErrorBlocks)
|
||||
|
||||
log.Warn("The following Gaps were found", "knownErrorBlocks", knownErrorBlocks)
|
||||
log.Warn("Updating known Gaps table", "startErrorBlock", startErrorBlock, "endErrorBlock", endErrorBlock, "processingKey", kg.processingKey)
|
||||
kg.pushKnownGaps(startErrorBlock, endErrorBlock, false, kg.processingKey)
|
||||
}
|
||||
|
||||
// Users provide the latestBlockInDb and the latestBlockOnChain
|
||||
// as well as the expected difference. This function does some simple math.
|
||||
// The expected difference for the time being is going to be 1, but as we run
|
||||
// More geth nodes, the expected difference might fluctuate.
|
||||
func isGap(latestBlockInDb *big.Int, latestBlockOnChain *big.Int, expectedDifference *big.Int) bool {
|
||||
latestBlock := big.NewInt(0)
|
||||
if latestBlock.Sub(latestBlockOnChain, expectedDifference).Cmp(latestBlockInDb) != 0 {
|
||||
log.Warn("We found a gap", "latestBlockInDb", latestBlockInDb, "latestBlockOnChain", latestBlockOnChain, "expectedDifference", expectedDifference)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This function will check for Gaps and update the DB if gaps are found.
|
||||
// The processingKey will currently be set to 0, but as we start to leverage horizontal scaling
|
||||
// It might be a useful parameter to update depending on the geth node.
|
||||
// TODO:
|
||||
// REmove the return value
|
||||
// Write to file if err in writing to DB
|
||||
func (kg *KnownGapsState) findAndUpdateGaps(latestBlockOnChain *big.Int, expectedDifference *big.Int, processingKey int64) error {
|
||||
// Make this global
|
||||
latestBlockInDb, err := kg.queryDbToBigInt(dbQueryString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gapExists := isGap(latestBlockInDb, latestBlockOnChain, expectedDifference)
|
||||
if gapExists {
|
||||
startBlock := big.NewInt(0)
|
||||
endBlock := big.NewInt(0)
|
||||
startBlock.Add(latestBlockInDb, expectedDifference)
|
||||
endBlock.Sub(latestBlockOnChain, expectedDifference)
|
||||
|
||||
log.Warn("Found Gaps starting at", "startBlock", startBlock, "endingBlock", endBlock)
|
||||
err := kg.pushKnownGaps(startBlock, endBlock, false, processingKey)
|
||||
if err != nil {
|
||||
log.Error("We were unable to write the following gap to the DB", "start Block", startBlock, "endBlock", endBlock, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upserts known gaps to the DB.
|
||||
func (kg *KnownGapsState) upsertKnownGaps(knownGaps models.KnownGapsModel) error {
|
||||
_, err := kg.db.Exec(context.Background(), kg.db.InsertKnownGapsStm(),
|
||||
knownGaps.StartingBlockNumber, knownGaps.EndingBlockNumber, knownGaps.CheckedOut, knownGaps.ProcessingKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error upserting known_gaps entry: %v", err)
|
||||
}
|
||||
log.Info("Successfully Wrote gaps to the DB", "startBlock", knownGaps.StartingBlockNumber, "endBlock", knownGaps.EndingBlockNumber)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write upsert statement into a local file.
|
||||
func (kg *KnownGapsState) upsertKnownGapsFile(knownGaps models.KnownGapsModel) error {
|
||||
insertStmt := []byte(fmt.Sprintf(knownGapsInsert, knownGaps.StartingBlockNumber, knownGaps.EndingBlockNumber, knownGaps.CheckedOut, knownGaps.ProcessingKey,
|
||||
knownGaps.EndingBlockNumber, knownGaps.ProcessingKey, knownGaps.EndingBlockNumber))
|
||||
log.Info("Trying to write file")
|
||||
if kg.writeFilePath == "" {
|
||||
kg.writeFilePath = defaultWriteFilePath
|
||||
}
|
||||
f, err := os.OpenFile(kg.writeFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Info("Unable to open a file for writing")
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err = f.Write(insertStmt); err != nil {
|
||||
log.Info("Unable to open write insert statement to file")
|
||||
return err
|
||||
}
|
||||
log.Info("Wrote the gaps to a local SQL file")
|
||||
kg.sqlFileWaitingForWrite = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kg *KnownGapsState) writeSqlFileStmtToDb() error {
|
||||
log.Info("Writing the local SQL file for KnownGaps to the DB")
|
||||
file, err := ioutil.ReadFile(kg.writeFilePath)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Unable to open local SQL File for writing")
|
||||
return err
|
||||
}
|
||||
|
||||
requests := strings.Split(string(file), ";")
|
||||
|
||||
for _, request := range requests {
|
||||
_, err := kg.db.Exec(context.Background(), request)
|
||||
if err != nil {
|
||||
log.Error("Unable to run insert statement from file to the DB")
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := os.Truncate(kg.writeFilePath, 0); err != nil {
|
||||
log.Info("Failed to empty knownGaps file after inserting statements to the DB", "error", err)
|
||||
}
|
||||
kg.sqlFileWaitingForWrite = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a simple wrapper function which will run QueryRow on the DB
|
||||
func (kg *KnownGapsState) queryDb(queryString string) (string, error) {
|
||||
var ret string
|
||||
err := kg.db.QueryRow(context.Background(), queryString).Scan(&ret)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprint("Can't properly query the DB for query: ", queryString))
|
||||
return "", err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// This function is a simple wrapper which will call QueryDb but the return value will be
|
||||
// a big int instead of a string
|
||||
func (kg *KnownGapsState) queryDbToBigInt(queryString string) (*big.Int, error) {
|
||||
ret := new(big.Int)
|
||||
res, err := kg.queryDb(queryString)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret, ok := ret.SetString(res, 10)
|
||||
if !ok {
|
||||
log.Error(fmt.Sprint("Can't turn the res ", res, "into a bigInt"))
|
||||
return ret, fmt.Errorf("Can't turn %s into a bigInt", res)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
package statediff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
knownGapsFilePath = "./known_gaps.sql"
|
||||
)
|
||||
|
||||
type gapValues struct {
|
||||
knownErrorBlocksStart int64
|
||||
knownErrorBlocksEnd int64
|
||||
expectedDif int64
|
||||
processingKey int64
|
||||
}
|
||||
|
||||
// Add clean db
|
||||
// Test for failures when they are expected, when we go from smaller block to larger block
|
||||
// We should no longer see the smaller block in DB
|
||||
func TestKnownGaps(t *testing.T) {
|
||||
tests := []gapValues{
|
||||
// Known Gaps
|
||||
{knownErrorBlocksStart: 115, knownErrorBlocksEnd: 120, expectedDif: 1, processingKey: 1},
|
||||
/// Same tests as above with a new expected DIF
|
||||
{knownErrorBlocksStart: 1150, knownErrorBlocksEnd: 1200, expectedDif: 2, processingKey: 2},
|
||||
// Test update when block number is larger!!
|
||||
{knownErrorBlocksStart: 1150, knownErrorBlocksEnd: 1204, expectedDif: 2, processingKey: 2},
|
||||
// Update when processing key is different!
|
||||
{knownErrorBlocksStart: 1150, knownErrorBlocksEnd: 1204, expectedDif: 2, processingKey: 10},
|
||||
}
|
||||
|
||||
testWriteToDb(t, tests, true)
|
||||
testWriteToFile(t, tests, true)
|
||||
testFindAndUpdateGaps(t, true)
|
||||
}
|
||||
|
||||
// test writing blocks to the DB
|
||||
func testWriteToDb(t *testing.T, tests []gapValues, wipeDbBeforeStart bool) {
|
||||
t.Log("Starting Write to DB test")
|
||||
db := setupDb(t)
|
||||
|
||||
// Clear Table first, this is needed because we updated an entry to have a larger endblock number
|
||||
// so we can't find the original start and endblock pair.
|
||||
if wipeDbBeforeStart {
|
||||
t.Log("Cleaning up eth_meta.known_gaps table")
|
||||
db.Exec(context.Background(), "DELETE FROM eth_meta.known_gaps")
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
// Create an array with knownGaps based on user inputs
|
||||
knownGaps := KnownGapsState{
|
||||
processingKey: tc.processingKey,
|
||||
expectedDifference: big.NewInt(tc.expectedDif),
|
||||
db: db,
|
||||
statediffMetrics: RegisterStatediffMetrics(metrics.DefaultRegistry),
|
||||
}
|
||||
service := &Service{
|
||||
KnownGaps: knownGaps,
|
||||
}
|
||||
knownErrorBlocks := (make([]*big.Int, 0))
|
||||
knownErrorBlocks = createKnownErrorBlocks(knownErrorBlocks, tc.knownErrorBlocksStart, tc.knownErrorBlocksEnd)
|
||||
service.KnownGaps.knownErrorBlocks = knownErrorBlocks
|
||||
// Upsert
|
||||
testCaptureErrorBlocks(t, service)
|
||||
// Validate that the upsert was done correctly.
|
||||
validateUpsert(t, service, tc.knownErrorBlocksStart, tc.knownErrorBlocksEnd)
|
||||
}
|
||||
tearDown(t, db)
|
||||
}
|
||||
|
||||
// test writing blocks to file and then inserting them to DB
|
||||
func testWriteToFile(t *testing.T, tests []gapValues, wipeDbBeforeStart bool) {
|
||||
t.Log("Starting write to file test")
|
||||
db := setupDb(t)
|
||||
// Clear Table first, this is needed because we updated an entry to have a larger endblock number
|
||||
// so we can't find the original start and endblock pair.
|
||||
if wipeDbBeforeStart {
|
||||
t.Log("Cleaning up eth_meta.known_gaps table")
|
||||
db.Exec(context.Background(), "DELETE FROM eth_meta.known_gaps")
|
||||
}
|
||||
if _, err := os.Stat(knownGapsFilePath); err == nil {
|
||||
err := os.Remove(knownGapsFilePath)
|
||||
if err != nil {
|
||||
t.Fatal("Can't delete local file")
|
||||
}
|
||||
}
|
||||
tearDown(t, db)
|
||||
for _, tc := range tests {
|
||||
knownGaps := KnownGapsState{
|
||||
processingKey: tc.processingKey,
|
||||
expectedDifference: big.NewInt(tc.expectedDif),
|
||||
writeFilePath: knownGapsFilePath,
|
||||
statediffMetrics: RegisterStatediffMetrics(metrics.DefaultRegistry),
|
||||
db: nil, // Only set to nil to be verbose that we can't use it
|
||||
}
|
||||
service := &Service{
|
||||
KnownGaps: knownGaps,
|
||||
}
|
||||
knownErrorBlocks := (make([]*big.Int, 0))
|
||||
knownErrorBlocks = createKnownErrorBlocks(knownErrorBlocks, tc.knownErrorBlocksStart, tc.knownErrorBlocksEnd)
|
||||
service.KnownGaps.knownErrorBlocks = knownErrorBlocks
|
||||
|
||||
testCaptureErrorBlocks(t, service)
|
||||
newDb := setupDb(t)
|
||||
service.KnownGaps.db = newDb
|
||||
if service.KnownGaps.sqlFileWaitingForWrite {
|
||||
writeErr := service.KnownGaps.writeSqlFileStmtToDb()
|
||||
require.NoError(t, writeErr)
|
||||
}
|
||||
|
||||
// Validate that the upsert was done correctly.
|
||||
validateUpsert(t, service, tc.knownErrorBlocksStart, tc.knownErrorBlocksEnd)
|
||||
tearDown(t, newDb)
|
||||
}
|
||||
}
|
||||
|
||||
// Find a gap, if no gaps exist, it will create an arbitrary one
|
||||
func testFindAndUpdateGaps(t *testing.T, wipeDbBeforeStart bool) {
|
||||
db := setupDb(t)
|
||||
|
||||
if wipeDbBeforeStart {
|
||||
db.Exec(context.Background(), "DELETE FROM eth_meta.known_gaps")
|
||||
}
|
||||
knownGaps := KnownGapsState{
|
||||
processingKey: 1,
|
||||
expectedDifference: big.NewInt(1),
|
||||
db: db,
|
||||
statediffMetrics: RegisterStatediffMetrics(metrics.DefaultRegistry),
|
||||
}
|
||||
service := &Service{
|
||||
KnownGaps: knownGaps,
|
||||
}
|
||||
|
||||
latestBlockInDb, err := service.KnownGaps.queryDbToBigInt("SELECT MAX(block_number) FROM eth.header_cids")
|
||||
if err != nil {
|
||||
t.Skip("Can't find a block in the eth.header_cids table.. Please put one there")
|
||||
}
|
||||
|
||||
// Add the gapDifference for testing purposes
|
||||
gapDifference := big.NewInt(10) // Set a difference between latestBlock in DB and on Chain
|
||||
expectedDifference := big.NewInt(1) // Set what the expected difference between latestBlock in DB and on Chain should be
|
||||
|
||||
latestBlockOnChain := big.NewInt(0)
|
||||
latestBlockOnChain.Add(latestBlockInDb, gapDifference)
|
||||
|
||||
t.Log("The latest block on the chain is: ", latestBlockOnChain)
|
||||
t.Log("The latest block on the DB is: ", latestBlockInDb)
|
||||
|
||||
gapUpsertErr := service.KnownGaps.findAndUpdateGaps(latestBlockOnChain, expectedDifference, 0)
|
||||
require.NoError(t, gapUpsertErr)
|
||||
|
||||
startBlock := big.NewInt(0)
|
||||
endBlock := big.NewInt(0)
|
||||
|
||||
startBlock.Add(latestBlockInDb, expectedDifference)
|
||||
endBlock.Sub(latestBlockOnChain, expectedDifference)
|
||||
validateUpsert(t, service, startBlock.Int64(), endBlock.Int64())
|
||||
}
|
||||
|
||||
// test capturing missed blocks
|
||||
func testCaptureErrorBlocks(t *testing.T, service *Service) {
|
||||
service.KnownGaps.captureErrorBlocks(service.KnownGaps.knownErrorBlocks)
|
||||
}
|
||||
|
||||
// Helper function to create an array of gaps given a start and end block
|
||||
func createKnownErrorBlocks(knownErrorBlocks []*big.Int, knownErrorBlocksStart int64, knownErrorBlocksEnd int64) []*big.Int {
|
||||
for i := knownErrorBlocksStart; i <= knownErrorBlocksEnd; i++ {
|
||||
knownErrorBlocks = append(knownErrorBlocks, big.NewInt(i))
|
||||
}
|
||||
return knownErrorBlocks
|
||||
}
|
||||
|
||||
// Make sure the upsert was performed correctly
|
||||
func validateUpsert(t *testing.T, service *Service, startingBlock int64, endingBlock int64) {
|
||||
t.Logf("Starting to query blocks: %d - %d", startingBlock, endingBlock)
|
||||
queryString := fmt.Sprintf("SELECT starting_block_number from eth_meta.known_gaps WHERE starting_block_number = %d AND ending_block_number = %d", startingBlock, endingBlock)
|
||||
|
||||
_, queryErr := service.KnownGaps.queryDb(queryString) // Figure out the string.
|
||||
t.Logf("Updated Known Gaps table starting from, %d, and ending at, %d", startingBlock, endingBlock)
|
||||
require.NoError(t, queryErr)
|
||||
}
|
||||
|
||||
// Create a DB object to use
|
||||
func setupDb(t *testing.T) sql.Database {
|
||||
db, err := postgres.SetupSQLXDB()
|
||||
if err != nil {
|
||||
t.Error("Can't create a DB connection....")
|
||||
t.Fatal(err)
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
// Teardown the DB
|
||||
func tearDown(t *testing.T, db sql.Database) {
|
||||
t.Log("Starting tearDown")
|
||||
db.Close()
|
||||
}
|
@ -50,14 +50,6 @@ type statediffMetricsHandles struct {
|
||||
// Current length of chainEvent channels
|
||||
serviceLoopChannelLen metrics.Gauge
|
||||
writeLoopChannelLen metrics.Gauge
|
||||
// The start block of the known gap
|
||||
knownGapStart metrics.Gauge
|
||||
// The end block of the known gap
|
||||
knownGapEnd metrics.Gauge
|
||||
// A known gaps start block which had an error being written to the DB
|
||||
knownGapErrorStart metrics.Gauge
|
||||
// A known gaps end block which had an error being written to the DB
|
||||
knownGapErrorEnd metrics.Gauge
|
||||
}
|
||||
|
||||
func RegisterStatediffMetrics(reg metrics.Registry) statediffMetricsHandles {
|
||||
@ -67,10 +59,6 @@ func RegisterStatediffMetrics(reg metrics.Registry) statediffMetricsHandles {
|
||||
lastStatediffHeight: metrics.NewGauge(),
|
||||
serviceLoopChannelLen: metrics.NewGauge(),
|
||||
writeLoopChannelLen: metrics.NewGauge(),
|
||||
knownGapStart: metrics.NewGauge(),
|
||||
knownGapEnd: metrics.NewGauge(),
|
||||
knownGapErrorStart: metrics.NewGauge(),
|
||||
knownGapErrorEnd: metrics.NewGauge(),
|
||||
}
|
||||
subsys := "service"
|
||||
reg.Register(metricName(subsys, "last_sync_height"), ctx.lastSyncHeight)
|
||||
@ -78,9 +66,5 @@ func RegisterStatediffMetrics(reg metrics.Registry) statediffMetricsHandles {
|
||||
reg.Register(metricName(subsys, "last_statediff_height"), ctx.lastStatediffHeight)
|
||||
reg.Register(metricName(subsys, "service_loop_channel_len"), ctx.serviceLoopChannelLen)
|
||||
reg.Register(metricName(subsys, "write_loop_channel_len"), ctx.writeLoopChannelLen)
|
||||
reg.Register(metricName(subsys, "known_gaps_start"), ctx.knownGapStart)
|
||||
reg.Register(metricName(subsys, "known_gaps_end"), ctx.knownGapEnd)
|
||||
reg.Register(metricName(subsys, "known_gaps_error_start"), ctx.knownGapErrorStart)
|
||||
reg.Register(metricName(subsys, "known_gaps_error_end"), ctx.knownGapErrorEnd)
|
||||
return ctx
|
||||
}
|
||||
|
@ -42,10 +42,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
types2 "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/thoas/go-funk"
|
||||
@ -134,8 +132,6 @@ type Service struct {
|
||||
BackendAPI ethapi.Backend
|
||||
// Should the statediff service wait for geth to sync to head?
|
||||
WaitForSync bool
|
||||
// Used to signal if we should check for KnownGaps
|
||||
KnownGaps KnownGapsState
|
||||
// Whether or not we have any subscribers; only if we do, do we processes state diffs
|
||||
subscribers int32
|
||||
// Interface for publishing statediffs as PG-IPLD objects
|
||||
@ -167,7 +163,6 @@ func NewBlockCache(max uint) BlockCache {
|
||||
func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params Config, backend ethapi.Backend) error {
|
||||
blockChain := ethServ.BlockChain()
|
||||
var indexer interfaces.StateDiffIndexer
|
||||
var db sql.Database
|
||||
var err error
|
||||
quitCh := make(chan bool)
|
||||
indexerConfigAvailable := params.IndexerConfig != nil
|
||||
@ -179,8 +174,7 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params
|
||||
ID: params.ID,
|
||||
ClientName: params.ClientName,
|
||||
}
|
||||
var err error
|
||||
db, indexer, err = ind.NewStateDiffIndexer(params.Context, blockChain.Config(), info, params.IndexerConfig)
|
||||
indexer, err = ind.NewStateDiffIndexer(params.Context, blockChain.Config(), info, params.IndexerConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -191,25 +185,6 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params
|
||||
if workers == 0 {
|
||||
workers = 1
|
||||
}
|
||||
// If we ever have multiple processingKeys we can update them here
|
||||
// along with the expectedDifference
|
||||
knownGaps := &KnownGapsState{
|
||||
processingKey: 0,
|
||||
expectedDifference: big.NewInt(1),
|
||||
errorState: false,
|
||||
writeFilePath: params.KnownGapsFilePath,
|
||||
db: db,
|
||||
statediffMetrics: statediffMetrics,
|
||||
sqlFileWaitingForWrite: false,
|
||||
}
|
||||
if indexerConfigAvailable {
|
||||
if params.IndexerConfig.Type() == shared.POSTGRES {
|
||||
knownGaps.checkForGaps = true
|
||||
} else {
|
||||
log.Info("We are not going to check for gaps on start up since we are not connected to Postgres!")
|
||||
knownGaps.checkForGaps = false
|
||||
}
|
||||
}
|
||||
sds := &Service{
|
||||
Mutex: sync.Mutex{},
|
||||
BlockChain: blockChain,
|
||||
@ -220,7 +195,6 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params
|
||||
BlockCache: NewBlockCache(workers),
|
||||
BackendAPI: backend,
|
||||
WaitForSync: params.WaitForSync,
|
||||
KnownGaps: *knownGaps,
|
||||
indexer: indexer,
|
||||
enableWriteLoop: params.EnableWriteLoop,
|
||||
numWorkers: workers,
|
||||
@ -355,45 +329,17 @@ func (sds *Service) writeLoopWorker(params workerParams) {
|
||||
sds.writeGenesisStateDiff(parentBlock, params.id)
|
||||
}
|
||||
|
||||
// If for any reason we need to check for gaps,
|
||||
// Check and update the gaps table.
|
||||
if sds.KnownGaps.checkForGaps && !sds.KnownGaps.errorState {
|
||||
log.Info("Checking for Gaps at", "current block", currentBlock.Number())
|
||||
go sds.KnownGaps.findAndUpdateGaps(currentBlock.Number(), sds.KnownGaps.expectedDifference, sds.KnownGaps.processingKey)
|
||||
sds.KnownGaps.checkForGaps = false
|
||||
}
|
||||
|
||||
log.Info("Writing state diff", "block height", currentBlock.Number().Uint64(), "worker", params.id)
|
||||
writeLoopParams.RLock()
|
||||
err := sds.writeStateDiffWithRetry(currentBlock, parentBlock.Root(), writeLoopParams.Params)
|
||||
writeLoopParams.RUnlock()
|
||||
// if processing failed with retries, not in the logs and continue to the next block
|
||||
if err != nil {
|
||||
log.Error("statediff.Service.WriteLoop: processing error", "block height", currentBlock.Number().Uint64(), "error", err.Error(), "worker", params.id)
|
||||
sds.KnownGaps.errorState = true
|
||||
log.Warn("Updating the following block to knownErrorBlocks to be inserted into knownGaps table", "blockNumber", currentBlock.Number())
|
||||
sds.KnownGaps.knownErrorBlocks = append(sds.KnownGaps.knownErrorBlocks, currentBlock.Number())
|
||||
// Write object to startdiff
|
||||
continue
|
||||
}
|
||||
sds.KnownGaps.errorState = false
|
||||
if sds.KnownGaps.knownErrorBlocks != nil {
|
||||
// We must pass in parameters by VALUE not reference.
|
||||
// If we pass them in my reference, the references can change before the computation is complete!
|
||||
staticKnownErrorBlocks := make([]*big.Int, len(sds.KnownGaps.knownErrorBlocks))
|
||||
copy(staticKnownErrorBlocks, sds.KnownGaps.knownErrorBlocks)
|
||||
sds.KnownGaps.knownErrorBlocks = nil
|
||||
go sds.KnownGaps.captureErrorBlocks(staticKnownErrorBlocks)
|
||||
}
|
||||
|
||||
if sds.KnownGaps.sqlFileWaitingForWrite {
|
||||
log.Info("There are entries in the SQL file for knownGaps that should be written")
|
||||
err := sds.KnownGaps.writeSqlFileStmtToDb()
|
||||
if err != nil {
|
||||
log.Error("Unable to write KnownGap sql file to DB")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: how to handle with concurrent workers
|
||||
// Note: when using multiple workers the blocks may not be processed in chronological order
|
||||
statediffMetrics.lastStatediffHeight.Update(int64(currentBlock.Number().Uint64()))
|
||||
case <-sds.QuitChan:
|
||||
log.Info("Quitting the statediff writing process", "worker", params.id)
|
||||
@ -884,7 +830,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrapper function on writeStateDiff to retry when the deadlock is detected.
|
||||
// writeStateDiffWithRetry is a wrapper around writeStateDiff to retry when the deadlock is detected.
|
||||
func (sds *Service) writeStateDiffWithRetry(block *types.Block, parentRoot common.Hash, params Params) error {
|
||||
var err error
|
||||
for i := uint(0); i < sds.maxRetry; i++ {
|
||||
@ -896,12 +842,12 @@ func (sds *Service) writeStateDiffWithRetry(block *types.Block, parentRoot commo
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
return err
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("error writing statediff at block %s due to deadlock error, unable to write after %d tries: %s", block.Number().String(), sds.maxRetry, err.Error())
|
||||
}
|
||||
|
||||
// Performs one of following operations on the watched addresses in writeLoopParams and the db:
|
||||
// WatchAddress performs one of following operations on the watched addresses in writeLoopParams and the db:
|
||||
// add | remove | set | clear
|
||||
func (sds *Service) WatchAddress(operation types2.OperationType, args []types2.WatchAddressArg) error {
|
||||
// lock writeLoopParams for a write
|
||||
|
Loading…
Reference in New Issue
Block a user