diff --git a/statediff/indexer/constructor.go b/statediff/indexer/constructor.go
index 9a66dba89..a8f2d5211 100644
--- a/statediff/indexer/constructor.go
+++ b/statediff/indexer/constructor.go
@@ -26,6 +26,8 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
+ v2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v2"
+ v3 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v3"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
@@ -44,27 +46,41 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, n
return file.NewStateDiffIndexer(ctx, chainConfig, fc)
case shared.POSTGRES:
log.Info("Starting statediff service in Postgres writing mode")
- pgc, ok := config.(postgres.Config)
+ pgc, ok := config.(postgres.MultiConfig)
if !ok {
return nil, fmt.Errorf("postgres config is not the correct type: got %T, expected %T", config, postgres.Config{})
}
var err error
- var driver sql.Driver
- switch pgc.Driver {
+ var oldDriver, newDriver interfaces.Driver
+ switch pgc.V2.Driver {
case postgres.PGX:
- driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo)
+ oldDriver, err = postgres.NewPGXDriver(ctx, pgc.V2)
if err != nil {
return nil, err
}
case postgres.SQLX:
- driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo)
+ oldDriver, err = postgres.NewSQLXDriver(ctx, pgc.V2)
if err != nil {
return nil, err
}
default:
- return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.Driver)
+ return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.V2.Driver)
}
- return sql.NewStateDiffIndexer(ctx, chainConfig, postgres.NewPostgresDB(driver))
+ switch pgc.V2.Driver {
+ case postgres.PGX:
+ newDriver, err = postgres.NewPGXDriver(ctx, pgc.V3)
+ if err != nil {
+ return nil, err
+ }
+ case postgres.SQLX:
+ newDriver, err = postgres.NewSQLXDriver(ctx, pgc.V3)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.V3.Driver)
+ }
+ return sql.NewStateDiffIndexer(ctx, chainConfig, nodeInfo, v2.NewPostgresDB(oldDriver), v3.NewPostgresDB(newDriver))
case shared.DUMP:
log.Info("Starting statediff service in data dump mode")
dumpc, ok := config.(dump.Config)
diff --git a/statediff/indexer/database/dump/batch_tx.go b/statediff/indexer/database/dump/batch_tx.go
index f1754b907..9e001dbca 100644
--- a/statediff/indexer/database/dump/batch_tx.go
+++ b/statediff/indexer/database/dump/batch_tx.go
@@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
@@ -33,8 +32,8 @@ type BatchTx struct {
BlockNumber uint64
dump io.Writer
quit chan struct{}
- iplds chan models.IPLDModel
- ipldCache models.IPLDBatch
+ iplds chan v3.IPLDModel
+ ipldCache v3.IPLDBatch
submit func(blockTx *BatchTx, err error) error
}
@@ -48,7 +47,7 @@ func (tx *BatchTx) flush() error {
if _, err := fmt.Fprintf(tx.dump, "%+v\r\n", tx.ipldCache); err != nil {
return err
}
- tx.ipldCache = models.IPLDBatch{}
+ tx.ipldCache = v3.IPLDBatch{}
return nil
}
@@ -60,21 +59,21 @@ func (tx *BatchTx) cache() {
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
case <-tx.quit:
- tx.ipldCache = models.IPLDBatch{}
+ tx.ipldCache = v3.IPLDBatch{}
return
}
}
}
func (tx *BatchTx) cacheDirect(key string, value []byte) {
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- v3.IPLDModel{
Key: key,
Data: value,
}
}
func (tx *BatchTx) cacheIPLD(i node.Node) {
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- v3.IPLDModel{
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Data: i.RawData(),
}
@@ -86,7 +85,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- v3.IPLDModel{
Key: prefixedKey,
Data: raw,
}
diff --git a/statediff/indexer/database/dump/indexer.go b/statediff/indexer/database/dump/indexer.go
index e450f941a..21f9f347a 100644
--- a/statediff/indexer/database/dump/indexer.go
+++ b/statediff/indexer/database/dump/indexer.go
@@ -22,8 +22,6 @@ import (
"math/big"
"time"
- ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
-
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
@@ -36,7 +34,9 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
@@ -61,12 +61,15 @@ func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) *StateD
}
}
-// ReportDBMetrics has nothing to report for dump
-func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {}
+// ReportOldDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportOldDBMetrics(time.Duration, <-chan bool) {}
+
+// ReportNewDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportNewDBMetrics(time.Duration, <-chan bool) {}
// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
-func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) {
start, t := time.Now(), time.Now()
blockHash := block.Hash()
blockHashStr := blockHash.String()
@@ -75,20 +78,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
transactions := block.Transactions()
// Derive any missing fields
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
- return nil, err
+ return nil, 0, err
}
// Generate the block iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
if err != nil {
- return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
+ return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
- return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
- return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
+ return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
}
// Calculate reward
@@ -104,9 +107,9 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
blockTx := &BatchTx{
BlockNumber: height,
dump: sdi.dump,
- iplds: make(chan models.IPLDModel),
+ iplds: make(chan sharedModels.IPLDModel),
quit: make(chan struct{}),
- ipldCache: models.IPLDBatch{},
+ ipldCache: sharedModels.IPLDBatch{},
submit: func(self *BatchTx, err error) error {
close(self.quit)
close(self.iplds)
@@ -139,7 +142,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
var headerID string
headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty)
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
indexerMetrics.tHeaderProcessing.Update(tDiff)
@@ -148,7 +151,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
// Publish and index uncles
err = sdi.processUncles(blockTx, headerID, height, uncleNodes)
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
indexerMetrics.tUncleProcessing.Update(tDiff)
@@ -169,14 +172,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
rctLeafNodeCIDs: rctLeafNodeCIDs,
})
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
indexerMetrics.tTxAndRecProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
t = time.Now()
- return blockTx, err
+ return blockTx, 0, err
}
// processHeader publishes and indexes a header IPLD in Postgres
@@ -185,7 +188,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
tx.cacheIPLD(headerNode)
headerID := header.Hash().String()
- mod := models.HeaderModel{
+ mod := v3Models.HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
@@ -217,7 +220,7 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
}
- uncle := models.UncleModel{
+ uncle := v3Models.UncleModel{
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
@@ -273,7 +276,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
if err != nil {
return fmt.Errorf("error deriving tx sender: %v", err)
}
- txModel := models.TxModel{
+ txModel := v3Models.TxModel{
HeaderID: args.headerID,
Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleZeroAddr(from),
@@ -295,7 +298,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
- accessListElementModel := models.AccessListElementModel{
+ accessListElementModel := v3Models.AccessListElementModel{
TxID: trxID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
@@ -318,7 +321,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return fmt.Errorf("invalid receipt leaf node cid")
}
- rctModel := &models.ReceiptModel{
+ rctModel := &v3Models.ReceiptModel{
TxID: trxID,
Contract: contract,
ContractHash: contractHash,
@@ -336,7 +339,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return err
}
- logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ logDataSet := make([]*v3Models.LogsModel, len(receipt.Logs))
for idx, l := range receipt.Logs {
topicSet := make([]string, 4)
for ti, topic := range l.Topics {
@@ -347,7 +350,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return fmt.Errorf("invalid log cid")
}
- logDataSet[idx] = &models.LogsModel{
+ logDataSet[idx] = &v3Models.LogsModel{
ReceiptID: trxID,
Address: l.Address.String(),
Index: int64(l.Index),
@@ -376,7 +379,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
}
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
-func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
@@ -385,8 +388,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if stateNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- stateModel := models.StateNodeModel{
- HeaderID: headerID,
+ stateModel := v3Models.StateNodeModel{
+ HeaderID: headerHash,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
@@ -400,8 +403,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
- stateModel := models.StateNodeModel{
- HeaderID: headerID,
+ stateModel := v3Models.StateNodeModel{
+ HeaderID: headerHash,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
@@ -425,8 +428,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
- accountModel := models.StateAccountModel{
- HeaderID: headerID,
+ accountModel := v3Models.StateAccountModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
@@ -442,8 +445,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if storageNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ storageModel := v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
@@ -460,8 +463,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ storageModel := v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
diff --git a/statediff/indexer/database/file/indexer.go b/statediff/indexer/database/file/indexer.go
index 870c1f259..49da42493 100644
--- a/statediff/indexer/database/file/indexer.go
+++ b/statediff/indexer/database/file/indexer.go
@@ -38,7 +38,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
@@ -86,12 +86,15 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, c
}, nil
}
-// ReportDBMetrics has nothing to report for dump
-func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {}
+// ReportOldDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportOldDBMetrics(time.Duration, <-chan bool) {}
+
+// ReportNewDBMetrics has nothing to report for dump
+func (sdi *StateDiffIndexer) ReportNewDBMetrics(time.Duration, <-chan bool) {}
// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
-func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) {
start, t := time.Now(), time.Now()
blockHash := block.Hash()
blockHashStr := blockHash.String()
@@ -100,20 +103,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
transactions := block.Transactions()
// Derive any missing fields
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
- return nil, err
+ return nil, 0, err
}
// Generate the block iplds
headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
if err != nil {
- return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
+ return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
- return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
- return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
+ return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
}
// Calculate reward
@@ -176,14 +179,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
rctLeafNodeCIDs: rctLeafNodeCIDs,
})
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
indexerMetrics.tTxAndRecProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
t = time.Now()
- return blockTx, err
+ return blockTx, 0, err
}
// processHeader write a header IPLD insert SQL stmt to a file
@@ -197,7 +200,7 @@ func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node
*baseFee = header.BaseFee.String()
}
headerID := header.Hash().String()
- sdi.fileWriter.upsertHeaderCID(models.HeaderModel{
+ sdi.fileWriter.upsertHeaderCID(v3Models.HeaderModel{
NodeID: sdi.nodeID,
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
@@ -229,7 +232,7 @@ func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber uint64,
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
}
- sdi.fileWriter.upsertUncleCID(models.UncleModel{
+ sdi.fileWriter.upsertUncleCID(v3Models.UncleModel{
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
@@ -280,7 +283,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
if err != nil {
return fmt.Errorf("error deriving tx sender: %v", err)
}
- txModel := models.TxModel{
+ txModel := v3Models.TxModel{
HeaderID: args.headerID,
Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleZeroAddr(from),
@@ -300,7 +303,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
- accessListElementModel := models.AccessListElementModel{
+ accessListElementModel := v3Models.AccessListElementModel{
TxID: txID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
@@ -321,7 +324,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
return fmt.Errorf("invalid receipt leaf node cid")
}
- rctModel := &models.ReceiptModel{
+ rctModel := &v3Models.ReceiptModel{
TxID: txID,
Contract: contract,
ContractHash: contractHash,
@@ -337,7 +340,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
sdi.fileWriter.upsertReceiptCID(rctModel)
// index logs
- logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ logDataSet := make([]*v3Models.LogsModel, len(receipt.Logs))
for idx, l := range receipt.Logs {
topicSet := make([]string, 4)
for ti, topic := range l.Topics {
@@ -348,7 +351,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
return fmt.Errorf("invalid log cid")
}
- logDataSet[idx] = &models.LogsModel{
+ logDataSet[idx] = &v3Models.LogsModel{
ReceiptID: txID,
Address: l.Address.String(),
Index: int64(l.Index),
@@ -374,13 +377,13 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
}
// PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file
-func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error {
// publish the state node
if stateNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- stateModel := models.StateNodeModel{
- HeaderID: headerID,
+ stateModel := v3Models.StateNodeModel{
+ HeaderID: headerHash,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
@@ -394,8 +397,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
- stateModel := models.StateNodeModel{
- HeaderID: headerID,
+ stateModel := v3Models.StateNodeModel{
+ HeaderID: headerHash,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
@@ -417,8 +420,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
- accountModel := models.StateAccountModel{
- HeaderID: headerID,
+ accountModel := v3Models.StateAccountModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
@@ -432,8 +435,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if storageNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ storageModel := v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
@@ -448,8 +451,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ storageModel := v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
diff --git a/statediff/indexer/database/file/indexer_test.go b/statediff/indexer/database/file/indexer_test.go
index ef849e8e8..e5a030dcf 100644
--- a/statediff/indexer/database/file/indexer_test.go
+++ b/statediff/indexer/database/file/indexer_test.go
@@ -24,9 +24,10 @@ import (
"os"
"testing"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models/v2"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ipfs/go-cid"
@@ -331,7 +332,7 @@ func TestFileIndexer(t *testing.T) {
if txRes.Value != transactions[3].Value().String() {
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
- accessListElementModels := make([]models.AccessListElementModel, 0)
+ accessListElementModels := make([]v2.AccessListElementModel, 0)
pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
err = sqlxdb.Select(&accessListElementModels, pgStr, c)
if err != nil {
@@ -340,11 +341,11 @@ func TestFileIndexer(t *testing.T) {
if len(accessListElementModels) != 2 {
t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
}
- model1 := models.AccessListElementModel{
+ model1 := v2.AccessListElementModel{
Index: accessListElementModels[0].Index,
Address: accessListElementModels[0].Address,
}
- model2 := models.AccessListElementModel{
+ model2 := v2.AccessListElementModel{
Index: accessListElementModels[1].Index,
Address: accessListElementModels[1].Address,
StorageKeys: accessListElementModels[1].StorageKeys,
@@ -447,7 +448,7 @@ func TestFileIndexer(t *testing.T) {
expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
for idx, c := range rcts {
- result := make([]models.IPLDModel, 0)
+ result := make([]v3.IPLDModel, 0)
pgStr = `SELECT data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
@@ -531,7 +532,7 @@ func TestFileIndexer(t *testing.T) {
defer tearDown(t)
// check that state nodes were properly indexed and published
- stateNodes := make([]models.StateNodeModel, 0)
+ stateNodes := make([]v2.StateNodeModel, 0)
pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type != 3`
@@ -553,7 +554,7 @@ func TestFileIndexer(t *testing.T) {
t.Fatal(err)
}
pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
- var account models.StateAccountModel
+ var account v2.StateAccountModel
err = sqlxdb.Get(&account, pgStr, stateNode.HeaderID, stateNode.Path)
if err != nil {
t.Fatal(err)
@@ -563,7 +564,7 @@ func TestFileIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v2.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "0",
@@ -577,7 +578,7 @@ func TestFileIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v2.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "1000",
@@ -589,7 +590,7 @@ func TestFileIndexer(t *testing.T) {
}
// check that Removed state nodes were properly indexed and published
- stateNodes = make([]models.StateNodeModel, 0)
+ stateNodes = make([]v2.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type = 3`
@@ -622,7 +623,7 @@ func TestFileIndexer(t *testing.T) {
defer tearDown(t)
// check that storage nodes were properly indexed
- storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes := make([]v2.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -634,7 +635,7 @@ func TestFileIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{
CID: storageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
@@ -655,7 +656,7 @@ func TestFileIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
// check that Removed storage nodes were properly indexed
- storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes = make([]v2.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -667,7 +668,7 @@ func TestFileIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
diff --git a/statediff/indexer/database/file/writer.go b/statediff/indexer/database/file/writer.go
index 48de0853d..520c2c641 100644
--- a/statediff/indexer/database/file/writer.go
+++ b/statediff/indexer/database/file/writer.go
@@ -20,13 +20,15 @@ import (
"fmt"
"io"
+ sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
+
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
)
@@ -161,19 +163,19 @@ func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) {
sqw.stmts <- []byte(fmt.Sprintf(nodeInsert, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID))
}
-func (sqw *SQLWriter) upsertIPLD(ipld models.IPLDModel) {
+func (sqw *SQLWriter) upsertIPLD(ipld sharedModels.IPLDModel) {
sqw.stmts <- []byte(fmt.Sprintf(ipldInsert, ipld.Key, ipld.Data))
}
func (sqw *SQLWriter) upsertIPLDDirect(key string, value []byte) {
- sqw.upsertIPLD(models.IPLDModel{
+ sqw.upsertIPLD(sharedModels.IPLDModel{
Key: key,
Data: value,
})
}
func (sqw *SQLWriter) upsertIPLDNode(i node.Node) {
- sqw.upsertIPLD(models.IPLDModel{
+ sqw.upsertIPLD(sharedModels.IPLDModel{
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Data: i.RawData(),
})
@@ -185,14 +187,14 @@ func (sqw *SQLWriter) upsertIPLDRaw(codec, mh uint64, raw []byte) (string, strin
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
- sqw.upsertIPLD(models.IPLDModel{
+ sqw.upsertIPLD(sharedModels.IPLDModel{
Key: prefixedKey,
Data: raw,
})
return c.String(), prefixedKey, err
}
-func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
+func (sqw *SQLWriter) upsertHeaderCID(header v3Models.HeaderModel) {
stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot,
header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase)
@@ -200,30 +202,30 @@ func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
indexerMetrics.blocks.Inc(1)
}
-func (sqw *SQLWriter) upsertUncleCID(uncle models.UncleModel) {
+func (sqw *SQLWriter) upsertUncleCID(uncle v3Models.UncleModel) {
sqw.stmts <- []byte(fmt.Sprintf(uncleInsert, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
uncle.Reward, uncle.MhKey))
}
-func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) {
+func (sqw *SQLWriter) upsertTransactionCID(transaction v3Models.TxModel) {
sqw.stmts <- []byte(fmt.Sprintf(txInsert, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst,
transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value))
indexerMetrics.transactions.Inc(1)
}
-func (sqw *SQLWriter) upsertAccessListElement(accessListElement models.AccessListElementModel) {
+func (sqw *SQLWriter) upsertAccessListElement(accessListElement v3Models.AccessListElementModel) {
sqw.stmts <- []byte(fmt.Sprintf(alInsert, accessListElement.TxID, accessListElement.Index, accessListElement.Address,
formatPostgresStringArray(accessListElement.StorageKeys)))
indexerMetrics.accessListEntries.Inc(1)
}
-func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) {
+func (sqw *SQLWriter) upsertReceiptCID(rct *v3Models.ReceiptModel) {
sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey,
rct.PostState, rct.PostStatus, rct.LogRoot))
indexerMetrics.receipts.Inc(1)
}
-func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) {
+func (sqw *SQLWriter) upsertLogCID(logs []*v3Models.LogsModel) {
for _, l := range logs {
sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0,
l.Topic1, l.Topic2, l.Topic3, l.Data))
@@ -231,7 +233,7 @@ func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) {
}
}
-func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) {
+func (sqw *SQLWriter) upsertStateCID(stateNode v3Models.StateNodeModel) {
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
@@ -240,12 +242,12 @@ func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) {
stateNode.NodeType, true, stateNode.MhKey))
}
-func (sqw *SQLWriter) upsertStateAccount(stateAccount models.StateAccountModel) {
+func (sqw *SQLWriter) upsertStateAccount(stateAccount v3Models.StateAccountModel) {
sqw.stmts <- []byte(fmt.Sprintf(accountInsert, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance,
stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot))
}
-func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
+func (sqw *SQLWriter) upsertStorageCID(storageCID v3Models.StorageNodeModel) {
var storageKey string
if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey
diff --git a/statediff/indexer/database/sql/batch_tx.go b/statediff/indexer/database/sql/batch_tx.go
index fb1b289a1..deec4f07b 100644
--- a/statediff/indexer/database/sql/batch_tx.go
+++ b/statediff/indexer/database/sql/batch_tx.go
@@ -19,6 +19,8 @@ package sql
import (
"context"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
@@ -26,18 +28,19 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ modelsShared "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
)
// BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration
type BatchTx struct {
- BlockNumber uint64
- ctx context.Context
- dbtx Tx
- stm string
- quit chan struct{}
- iplds chan models.IPLDModel
- ipldCache models.IPLDBatch
+ BlockNumber uint64
+ ctx context.Context
+ oldDBTx interfaces.Tx
+ newDBTx interfaces.Tx
+ oldStmt, newStmt string
+ quit chan struct{}
+ iplds chan modelsShared.IPLDModel
+ ipldCache modelsShared.IPLDBatch
submit func(blockTx *BatchTx, err error) error
}
@@ -48,11 +51,15 @@ func (tx *BatchTx) Submit(err error) error {
}
func (tx *BatchTx) flush() error {
- _, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
+ _, err := tx.oldDBTx.Exec(tx.ctx, tx.oldStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
if err != nil {
return err
}
- tx.ipldCache = models.IPLDBatch{}
+ _, err = tx.newDBTx.Exec(tx.ctx, tx.newStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values))
+ if err != nil {
+ return err
+ }
+ tx.ipldCache = modelsShared.IPLDBatch{}
return nil
}
@@ -64,21 +71,21 @@ func (tx *BatchTx) cache() {
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
case <-tx.quit:
- tx.ipldCache = models.IPLDBatch{}
+ tx.ipldCache = modelsShared.IPLDBatch{}
return
}
}
}
func (tx *BatchTx) cacheDirect(key string, value []byte) {
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- modelsShared.IPLDModel{
Key: key,
Data: value,
}
}
func (tx *BatchTx) cacheIPLD(i node.Node) {
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- modelsShared.IPLDModel{
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Data: i.RawData(),
}
@@ -90,7 +97,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
- tx.iplds <- models.IPLDModel{
+ tx.iplds <- modelsShared.IPLDModel{
Key: prefixedKey,
Data: raw,
}
@@ -98,7 +105,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error
}
// rollback sql transaction and log any error
-func rollback(ctx context.Context, tx Tx) {
+func rollback(ctx context.Context, tx interfaces.Tx) {
if err := tx.Rollback(ctx); err != nil {
log.Error(err.Error())
}
diff --git a/statediff/indexer/database/sql/indexer.go b/statediff/indexer/database/sql/indexer.go
index 3e578a469..2b67832f1 100644
--- a/statediff/indexer/database/sql/indexer.go
+++ b/statediff/indexer/database/sql/indexer.go
@@ -21,12 +21,12 @@ package sql
import (
"context"
+ "errors"
"fmt"
"math/big"
+ "strings"
"time"
- ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
-
"github.com/ipfs/go-cid"
node "github.com/ipfs/go-ipld-format"
"github.com/multiformats/go-multihash"
@@ -38,8 +38,14 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ v2Writer "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/v2"
+ v3Writer "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/v3"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
+ sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
+ v2Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v2"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
+ nodeInfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
@@ -55,24 +61,38 @@ var (
type StateDiffIndexer struct {
ctx context.Context
chainConfig *params.ChainConfig
- dbWriter *Writer
+ oldDBWriter *v2Writer.Writer
+ newDBWriter *v3Writer.Writer
}
// NewStateDiffIndexer creates a sql implementation of interfaces.StateDiffIndexer
-func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, db Database) (*StateDiffIndexer, error) {
+func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, info nodeInfo.Info, old, new interfaces.Database) (*StateDiffIndexer, error) {
// Write the removed node to the db on init
- if _, err := db.Exec(ctx, db.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil {
+ if _, err := old.Exec(ctx, old.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil {
+ return nil, err
+ }
+ if _, err := new.Exec(ctx, new.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil {
+ return nil, err
+ }
+ // Write node info to the db on init
+ oldWriter := v2Writer.NewWriter(old)
+ newWriter := v3Writer.NewWriter(new)
+ if err := oldWriter.InsertNodeInfo(info); err != nil {
+ return nil, err
+ }
+ if err := newWriter.InsertNodeInfo(info); err != nil {
return nil, err
}
return &StateDiffIndexer{
ctx: ctx,
chainConfig: chainConfig,
- dbWriter: NewWriter(db),
+ oldDBWriter: oldWriter,
+ newDBWriter: newWriter,
}, nil
}
-// ReportDBMetrics is a reporting function to run as goroutine
-func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bool) {
+// ReportOldDBMetrics is a reporting function to run as goroutine
+func (sdi *StateDiffIndexer) ReportOldDBMetrics(delay time.Duration, quit <-chan bool) {
if !metrics.Enabled {
return
}
@@ -81,7 +101,26 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo
for {
select {
case <-ticker.C:
- dbMetrics.Update(sdi.dbWriter.db.Stats())
+ dbMetrics.Update(sdi.oldDBWriter.Stats())
+ case <-quit:
+ ticker.Stop()
+ return
+ }
+ }
+ }()
+}
+
+// ReportNewDBMetrics is a reporting function to run as goroutine
+func (sdi *StateDiffIndexer) ReportNewDBMetrics(delay time.Duration, quit <-chan bool) {
+ if !metrics.Enabled {
+ return
+ }
+ ticker := time.NewTicker(delay)
+ go func() {
+ for {
+ select {
+ case <-ticker.C:
+ dbMetrics.Update(sdi.newDBWriter.DB.Stats())
case <-quit:
ticker.Stop()
return
@@ -92,7 +131,7 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo
// PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts)
// Returns an initiated DB transaction which must be Closed via defer to commit or rollback
-func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
+func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) {
start, t := time.Now(), time.Now()
blockHash := block.Hash()
blockHashStr := blockHash.String()
@@ -101,20 +140,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
transactions := block.Transactions()
// Derive any missing fields
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil {
- return nil, err
+ return nil, 0, err
}
// Generate the block iplds
- headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts)
+ headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld.FromBlockAndReceipts(block, receipts)
if err != nil {
- return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
+ return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
}
if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) {
- return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
+ return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs))
}
if len(txTrieNodes) != len(rctTrieNodes) {
- return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
+ return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes))
}
// Calculate reward
@@ -128,26 +167,40 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
t = time.Now()
// Begin new db tx for everything
- tx, err := sdi.dbWriter.db.Begin(sdi.ctx)
+ oldTx, err := sdi.oldDBWriter.DB.Begin(sdi.ctx)
if err != nil {
- return nil, err
+ return nil, 0, err
}
defer func() {
if p := recover(); p != nil {
- rollback(sdi.ctx, tx)
+ rollback(sdi.ctx, oldTx)
panic(p)
} else if err != nil {
- rollback(sdi.ctx, tx)
+ rollback(sdi.ctx, oldTx)
+ }
+ }()
+ newTx, err := sdi.newDBWriter.DB.Begin(sdi.ctx)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer func() {
+ if p := recover(); p != nil {
+ rollback(sdi.ctx, newTx)
+ panic(p)
+ } else if err != nil {
+ rollback(sdi.ctx, newTx)
}
}()
blockTx := &BatchTx{
ctx: sdi.ctx,
BlockNumber: height,
- stm: sdi.dbWriter.db.InsertIPLDsStm(),
- iplds: make(chan models.IPLDModel),
+ oldStmt: sdi.oldDBWriter.DB.InsertIPLDsStm(),
+ newStmt: sdi.newDBWriter.DB.InsertStateStm(),
+ iplds: make(chan sharedModels.IPLDModel),
quit: make(chan struct{}),
- ipldCache: models.IPLDBatch{},
- dbtx: tx,
+ ipldCache: sharedModels.IPLDBatch{},
+ oldDBTx: oldTx,
+ newDBTx: newTx,
// handle transaction commit or rollback for any return case
submit: func(self *BatchTx, err error) error {
defer func() {
@@ -155,24 +208,38 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
close(self.iplds)
}()
if p := recover(); p != nil {
- rollback(sdi.ctx, tx)
+ rollback(sdi.ctx, oldTx)
+ rollback(sdi.ctx, newTx)
panic(p)
} else if err != nil {
- rollback(sdi.ctx, tx)
+ rollback(sdi.ctx, oldTx)
+ rollback(sdi.ctx, newTx)
} else {
tDiff := time.Since(t)
- indexerMetrics.tStateStoreCodeProcessing.Update(tDiff)
+ indexerMetrics.TimeStateStoreCodeProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String())
t = time.Now()
if err := self.flush(); err != nil {
- rollback(sdi.ctx, tx)
+ rollback(sdi.ctx, oldTx)
+ rollback(sdi.ctx, newTx)
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
log.Debug(traceMsg)
return err
}
- err = tx.Commit(sdi.ctx)
+ errs := make([]string, 0, 2)
+ err = oldTx.Commit(sdi.ctx)
+ if err != nil {
+ errs = append(errs, fmt.Sprintf("old DB tx commit error: %s", err.Error()))
+ }
+ err = newTx.Commit(sdi.ctx)
+ if err != nil {
+ errs = append(errs, fmt.Sprintf("new DB tx commit error: %s", err.Error()))
+ }
+ if len(errs) > 0 {
+ err = errors.New(strings.Join(errs, " && "))
+ }
tDiff = time.Since(t)
- indexerMetrics.tPostgresCommit.Update(tDiff)
+ indexerMetrics.TimePostgresCommit.Update(tDiff)
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
}
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
@@ -183,32 +250,33 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
go blockTx.cache()
tDiff := time.Since(t)
- indexerMetrics.tFreePostgres.Update(tDiff)
+ indexerMetrics.TimeFreePostgres.Update(tDiff)
traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String())
t = time.Now()
// Publish and index header, collect headerID
- var headerID string
+ var headerID int64
headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty)
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
- indexerMetrics.tHeaderProcessing.Update(tDiff)
+ indexerMetrics.TimeHeaderProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index uncles
- err = sdi.processUncles(blockTx, headerID, height, uncleNodes)
+ err = sdi.processUncles(blockTx, blockHashStr, headerID, height, uncleNodes)
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
- indexerMetrics.tUncleProcessing.Update(tDiff)
+ indexerMetrics.TimeUncleProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
t = time.Now()
// Publish and index receipts and txs
err = sdi.processReceiptsAndTxs(blockTx, processArgs{
+ headerHash: blockHashStr,
headerID: headerID,
blockNumber: block.Number(),
receipts: receipts,
@@ -222,19 +290,19 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
rctLeafNodeCIDs: rctLeafNodeCIDs,
})
if err != nil {
- return nil, err
+ return nil, 0, err
}
tDiff = time.Since(t)
- indexerMetrics.tTxAndRecProcessing.Update(tDiff)
+ indexerMetrics.TimeTxAndRecProcessing.Update(tDiff)
traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String())
t = time.Now()
- return blockTx, err
+ return blockTx, headerID, err
}
// processHeader publishes and indexes a header IPLD in Postgres
// it returns the headerID
-func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) {
+func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) {
tx.cacheIPLD(headerNode)
var baseFee *string
@@ -242,14 +310,32 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
baseFee = new(string)
*baseFee = header.BaseFee.String()
}
- headerID := header.Hash().String()
// index header
- return headerID, sdi.dbWriter.upsertHeaderCID(tx.dbtx, models.HeaderModel{
+ headerID, err := sdi.oldDBWriter.InsertHeaderCID(tx.oldDBTx, &v2Models.HeaderModel{
CID: headerNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
ParentHash: header.ParentHash.String(),
BlockNumber: header.Number.String(),
- BlockHash: headerID,
+ BlockHash: header.Hash().String(),
+ TotalDifficulty: td.String(),
+ Reward: reward.String(),
+ Bloom: header.Bloom.Bytes(),
+ StateRoot: header.Root.String(),
+ RctRoot: header.ReceiptHash.String(),
+ TxRoot: header.TxHash.String(),
+ UncleRoot: header.UncleHash.String(),
+ Timestamp: header.Time,
+ BaseFee: baseFee,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if err := sdi.newDBWriter.InsertHeaderCID(tx.newDBTx, v3Models.HeaderModel{
+ CID: headerNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(headerNode.Cid()),
+ ParentHash: header.ParentHash.String(),
+ BlockNumber: header.Number.String(),
+ BlockHash: header.Hash().String(),
TotalDifficulty: td.String(),
Reward: reward.String(),
Bloom: header.Bloom.Bytes(),
@@ -259,11 +345,14 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he
UncleRoot: header.UncleHash.String(),
Timestamp: header.Time,
Coinbase: header.Coinbase.String(),
- })
+ }); err != nil {
+ return 0, err
+ }
+ return headerID, nil
}
// processUncles publishes and indexes uncle IPLDs in Postgres
-func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error {
+func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerHash string, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error {
// publish and index uncles
for _, uncleNode := range uncleNodes {
tx.cacheIPLD(uncleNode)
@@ -274,15 +363,24 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
} else {
uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64())
}
- uncle := models.UncleModel{
+ if err := sdi.oldDBWriter.InsertUncleCID(tx.oldDBTx, &v2Models.UncleModel{
HeaderID: headerID,
CID: uncleNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
ParentHash: uncleNode.ParentHash.String(),
BlockHash: uncleNode.Hash().String(),
Reward: uncleReward.String(),
+ }); err != nil {
+ return err
}
- if err := sdi.dbWriter.upsertUncleCID(tx.dbtx, uncle); err != nil {
+ if err := sdi.newDBWriter.InsertUncleCID(tx.newDBTx, &v3Models.UncleModel{
+ HeaderID: headerHash,
+ CID: uncleNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()),
+ ParentHash: uncleNode.ParentHash.String(),
+ BlockHash: uncleNode.Hash().String(),
+ Reward: uncleReward.String(),
+ }); err != nil {
return err
}
}
@@ -291,14 +389,15 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
// processArgs bundles arguments to processReceiptsAndTxs
type processArgs struct {
- headerID string
+ headerID int64
+ headerHash string
blockNumber *big.Int
receipts types.Receipts
txs types.Transactions
- rctNodes []*ipld2.EthReceipt
- rctTrieNodes []*ipld2.EthRctTrie
- txNodes []*ipld2.EthTx
- txTrieNodes []*ipld2.EthTxTrie
+ rctNodes []*ipld.EthReceipt
+ rctTrieNodes []*ipld.EthRctTrie
+ txNodes []*ipld.EthTx
+ txTrieNodes []*ipld.EthTxTrie
logTrieNodes [][]node.Node
logLeafNodeCIDs [][]cid.Cid
rctLeafNodeCIDs []cid.Cid
@@ -317,7 +416,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
// index tx
trx := args.txs[i]
- txID := trx.Hash().String()
+ txHash := trx.Hash().String()
var val string
if trx.Value() != nil {
@@ -329,35 +428,54 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
if err != nil {
return fmt.Errorf("error deriving tx sender: %v", err)
}
- txModel := models.TxModel{
+ txID, err := sdi.oldDBWriter.InsertTransactionCID(tx.oldDBTx, &v2Models.TxModel{
HeaderID: args.headerID,
Dst: shared.HandleZeroAddrPointer(trx.To()),
Src: shared.HandleZeroAddr(from),
- TxHash: txID,
+ TxHash: txHash,
+ Index: int64(i),
+ Data: trx.Data(),
+ CID: txNode.Cid().String(),
+ MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
+ Type: trx.Type(),
+ })
+ if err != nil {
+ return err
+ }
+ if err := sdi.newDBWriter.InsertTransactionCID(tx.newDBTx, &v3Models.TxModel{
+ HeaderID: args.headerHash,
+ Dst: shared.HandleZeroAddrPointer(trx.To()),
+ Src: shared.HandleZeroAddr(from),
+ TxHash: txHash,
Index: int64(i),
Data: trx.Data(),
CID: txNode.Cid().String(),
MhKey: shared.MultihashKeyFromCID(txNode.Cid()),
Type: trx.Type(),
Value: val,
- }
- if err := sdi.dbWriter.upsertTransactionCID(tx.dbtx, txModel); err != nil {
+ }); err != nil {
return err
}
-
// index access list if this is one
for j, accessListElement := range trx.AccessList() {
storageKeys := make([]string, len(accessListElement.StorageKeys))
for k, storageKey := range accessListElement.StorageKeys {
storageKeys[k] = storageKey.Hex()
}
- accessListElementModel := models.AccessListElementModel{
+ if err := sdi.oldDBWriter.InsertAccessListElement(tx.oldDBTx, &v2Models.AccessListElementModel{
TxID: txID,
Index: int64(j),
Address: accessListElement.Address.Hex(),
StorageKeys: storageKeys,
+ }); err != nil {
+ return err
}
- if err := sdi.dbWriter.upsertAccessListElement(tx.dbtx, accessListElementModel); err != nil {
+ if err := sdi.newDBWriter.InsertAccessListElement(tx.newDBTx, &v3Models.AccessListElementModel{
+ TxID: txHash,
+ Index: int64(j),
+ Address: accessListElement.Address.Hex(),
+ StorageKeys: storageKeys,
+ }); err != nil {
return err
}
}
@@ -374,26 +492,44 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return fmt.Errorf("invalid receipt leaf node cid")
}
- rctModel := &models.ReceiptModel{
+ var postState string
+ var postStatus uint64
+ if len(receipt.PostState) == 0 {
+ postStatus = receipt.Status
+ } else {
+ postState = common.Bytes2Hex(receipt.PostState)
+ }
+
+ rctID, err := sdi.oldDBWriter.InsertReceiptCID(tx.oldDBTx, &v2Models.ReceiptModel{
TxID: txID,
Contract: contract,
ContractHash: contractHash,
LeafCID: args.rctLeafNodeCIDs[i].String(),
LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
LogRoot: args.rctNodes[i].LogRoot.String(),
+ PostState: postState,
+ PostStatus: postStatus,
+ })
+ if err != nil {
+ return err
}
- if len(receipt.PostState) == 0 {
- rctModel.PostStatus = receipt.Status
- } else {
- rctModel.PostState = common.Bytes2Hex(receipt.PostState)
- }
-
- if err := sdi.dbWriter.upsertReceiptCID(tx.dbtx, rctModel); err != nil {
+ if err := sdi.newDBWriter.InsertReceiptCID(tx.newDBTx, &v3Models.ReceiptModel{
+ TxID: txHash,
+ Contract: contract,
+ ContractHash: contractHash,
+ LeafCID: args.rctLeafNodeCIDs[i].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]),
+ LogRoot: args.rctNodes[i].LogRoot.String(),
+ PostState: postState,
+ PostStatus: postStatus,
+ }); err != nil {
return err
}
// index logs
- logDataSet := make([]*models.LogsModel, len(receipt.Logs))
+ rctLen := len(receipt.Logs)
+ oldLogDataSet := make([]*v2Models.LogsModel, rctLen)
+ newLogDataSet := make([]*v3Models.LogsModel, rctLen)
for idx, l := range receipt.Logs {
topicSet := make([]string, 4)
for ti, topic := range l.Topics {
@@ -404,8 +540,20 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
return fmt.Errorf("invalid log cid")
}
- logDataSet[idx] = &models.LogsModel{
- ReceiptID: txID,
+ oldLogDataSet[idx] = &v2Models.LogsModel{
+ ReceiptID: rctID,
+ Address: l.Address.String(),
+ Index: int64(l.Index),
+ Data: l.Data,
+ LeafCID: args.logLeafNodeCIDs[i][idx].String(),
+ LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]),
+ Topic0: topicSet[0],
+ Topic1: topicSet[1],
+ Topic2: topicSet[2],
+ Topic3: topicSet[3],
+ }
+ newLogDataSet[idx] = &v3Models.LogsModel{
+ ReceiptID: txHash,
Address: l.Address.String(),
Index: int64(l.Index),
Data: l.Data,
@@ -417,8 +565,10 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
Topic3: topicSet[3],
}
}
-
- if err := sdi.dbWriter.upsertLogCID(tx.dbtx, logDataSet); err != nil {
+ if err := sdi.oldDBWriter.InsertLogCID(tx.oldDBTx, oldLogDataSet); err != nil {
+ return err
+ }
+ if err := sdi.newDBWriter.InsertLogCID(tx.newDBTx, newLogDataSet); err != nil {
return err
}
}
@@ -433,7 +583,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
}
// PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql
-func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error {
+func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error {
tx, ok := batch.(*BatchTx)
if !ok {
return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch)
@@ -442,30 +592,47 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if stateNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- stateModel := models.StateNodeModel{
+ _, err := sdi.oldDBWriter.InsertStateCID(tx.oldDBTx, &v2Models.StateNodeModel{
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: shared.RemovedNodeStateCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: stateNode.NodeType.Int(),
+ })
+ if err != nil {
+ return err
}
- return sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel)
+ return sdi.newDBWriter.InsertStateCID(tx.newDBTx, &v3Models.StateNodeModel{
+ HeaderID: headerHash,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: shared.RemovedNodeStateCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ })
}
- stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
+ stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing state node IPLD: %v", err)
}
- stateModel := models.StateNodeModel{
+ // index the state node
+ stateID, err := sdi.oldDBWriter.InsertStateCID(tx.oldDBTx, &v2Models.StateNodeModel{
HeaderID: headerID,
Path: stateNode.Path,
StateKey: common.BytesToHash(stateNode.LeafKey).String(),
CID: stateCIDStr,
MhKey: stateMhKey,
NodeType: stateNode.NodeType.Int(),
- }
- // index the state node
- if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil {
+ })
+ if err := sdi.newDBWriter.InsertStateCID(tx.newDBTx, &v3Models.StateNodeModel{
+ HeaderID: headerHash,
+ Path: stateNode.Path,
+ StateKey: common.BytesToHash(stateNode.LeafKey).String(),
+ CID: stateCIDStr,
+ MhKey: stateMhKey,
+ NodeType: stateNode.NodeType.Int(),
+ }); err != nil {
return err
}
// if we have a leaf, decode and index the account data
@@ -481,15 +648,23 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil {
return fmt.Errorf("error decoding state account rlp: %s", err.Error())
}
- accountModel := models.StateAccountModel{
- HeaderID: headerID,
+ if err := sdi.oldDBWriter.InsertStateAccount(tx.oldDBTx, &v2Models.StateAccountModel{
+ StateID: stateID,
+ Balance: account.Balance.String(),
+ Nonce: account.Nonce,
+ CodeHash: account.CodeHash,
+ StorageRoot: account.Root.String(),
+ }); err != nil {
+ return err
+ }
+ if err := sdi.newDBWriter.InsertStateAccount(tx.newDBTx, &v3Models.StateAccountModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Balance: account.Balance.String(),
Nonce: account.Nonce,
CodeHash: account.CodeHash,
StorageRoot: account.Root.String(),
- }
- if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil {
+ }); err != nil {
return err
}
}
@@ -498,34 +673,52 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt
if storageNode.NodeType == sdtypes.Removed {
// short circuit if it is a Removed node
// this assumes the db has been initialized and a public.blocks entry for the Removed node is present
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ if err := sdi.oldDBWriter.InsertStorageCID(tx.oldDBTx, &v2Models.StorageNodeModel{
+ StateID: stateID,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: shared.RemovedNodeStorageCID,
+ MhKey: shared.RemovedNodeMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }); err != nil {
+ return err
+ }
+ if err := sdi.newDBWriter.InsertStorageCID(tx.newDBTx, &v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: shared.RemovedNodeStorageCID,
MhKey: shared.RemovedNodeMhKey,
NodeType: storageNode.NodeType.Int(),
- }
- if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
+ }); err != nil {
return err
}
continue
}
- storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
+ storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue)
if err != nil {
return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err)
}
- storageModel := models.StorageNodeModel{
- HeaderID: headerID,
+ if err := sdi.oldDBWriter.InsertStorageCID(tx.oldDBTx, &v2Models.StorageNodeModel{
+ StateID: stateID,
+ Path: storageNode.Path,
+ StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
+ CID: storageCIDStr,
+ MhKey: storageMhKey,
+ NodeType: storageNode.NodeType.Int(),
+ }); err != nil {
+ return err
+ }
+ if err := sdi.newDBWriter.InsertStorageCID(tx.newDBTx, &v3Models.StorageNodeModel{
+ HeaderID: headerHash,
StatePath: stateNode.Path,
Path: storageNode.Path,
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
CID: storageCIDStr,
MhKey: storageMhKey,
NodeType: storageNode.NodeType.Int(),
- }
- if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil {
+ }); err != nil {
return err
}
}
@@ -550,5 +743,8 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAnd
// Close satisfies io.Closer
func (sdi *StateDiffIndexer) Close() error {
- return sdi.dbWriter.Close()
+ if err := sdi.oldDBWriter.Close(); err != nil {
+ return err
+ }
+ return sdi.newDBWriter.Close()
}
diff --git a/statediff/indexer/database/sql/indexer_shared_test.go b/statediff/indexer/database/sql/indexer_shared_test.go
index 8bbab22ba..997267c46 100644
--- a/statediff/indexer/database/sql/indexer_shared_test.go
+++ b/statediff/indexer/database/sql/indexer_shared_test.go
@@ -12,14 +12,13 @@ import (
"github.com/multiformats/go-multihash"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
)
var (
- db sql.Database
+ db interfaces.Database
err error
ind interfaces.StateDiffIndexer
ipfsPgGet = `SELECT data FROM public.blocks
diff --git a/statediff/indexer/database/sql/interfaces.go b/statediff/indexer/database/sql/interfaces.go
deleted file mode 100644
index 445b35d9b..000000000
--- a/statediff/indexer/database/sql/interfaces.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// VulcanizeDB
-// Copyright © 2021 Vulcanize
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-
-// You should have received a copy of the GNU Affero General Public License
-// along with this program. If not, see .
-
-package sql
-
-import (
- "context"
- "io"
- "time"
-)
-
-// Database interfaces required by the sql indexer
-type Database interface {
- Driver
- Statements
-}
-
-// Driver interface has all the methods required by a driver implementation to support the sql indexer
-type Driver interface {
- QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
- Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
- Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error
- Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error
- Begin(ctx context.Context) (Tx, error)
- Stats() Stats
- NodeID() string
- Context() context.Context
- io.Closer
-}
-
-// Statements interface to accommodate different SQL query syntax
-type Statements interface {
- InsertHeaderStm() string
- InsertUncleStm() string
- InsertTxStm() string
- InsertAccessListElementStm() string
- InsertRctStm() string
- InsertLogStm() string
- InsertStateStm() string
- InsertAccountStm() string
- InsertStorageStm() string
- InsertIPLDStm() string
- InsertIPLDsStm() string
-}
-
-// Tx interface to accommodate different concrete SQL transaction types
-type Tx interface {
- QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
- Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
- Commit(ctx context.Context) error
- Rollback(ctx context.Context) error
-}
-
-// ScannableRow interface to accommodate different concrete row types
-type ScannableRow interface {
- Scan(dest ...interface{}) error
-}
-
-// Result interface to accommodate different concrete result types
-type Result interface {
- RowsAffected() (int64, error)
-}
-
-// Stats interface to accommodate different concrete sql stats types
-type Stats interface {
- MaxOpen() int64
- Open() int64
- InUse() int64
- Idle() int64
- WaitCount() int64
- WaitDuration() time.Duration
- MaxIdleClosed() int64
- MaxLifetimeClosed() int64
-}
diff --git a/statediff/indexer/database/sql/metrics.go b/statediff/indexer/database/sql/metrics.go
index b0946a722..f59edcf14 100644
--- a/statediff/indexer/database/sql/metrics.go
+++ b/statediff/indexer/database/sql/metrics.go
@@ -19,6 +19,8 @@ package sql
import (
"strings"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+
"github.com/ethereum/go-ethereum/metrics"
)
@@ -39,57 +41,57 @@ func metricName(subsystem, name string) string {
return strings.Join(parts, "/")
}
-type indexerMetricsHandles struct {
+type IndexerMetricsHandles struct {
// The total number of processed blocks
- blocks metrics.Counter
+ Blocks metrics.Counter
// The total number of processed transactions
- transactions metrics.Counter
+ Transactions metrics.Counter
// The total number of processed receipts
- receipts metrics.Counter
+ Receipts metrics.Counter
// The total number of processed logs
- logs metrics.Counter
+ Logs metrics.Counter
// The total number of access list entries processed
- accessListEntries metrics.Counter
+ AccessListEntries metrics.Counter
// Time spent waiting for free postgres tx
- tFreePostgres metrics.Timer
+ TimeFreePostgres metrics.Timer
// Postgres transaction commit duration
- tPostgresCommit metrics.Timer
+ TimePostgresCommit metrics.Timer
// Header processing time
- tHeaderProcessing metrics.Timer
+ TimeHeaderProcessing metrics.Timer
// Uncle processing time
- tUncleProcessing metrics.Timer
+ TimeUncleProcessing metrics.Timer
// Tx and receipt processing time
- tTxAndRecProcessing metrics.Timer
+ TimeTxAndRecProcessing metrics.Timer
// State, storage, and code combined processing time
- tStateStoreCodeProcessing metrics.Timer
+ TimeStateStoreCodeProcessing metrics.Timer
}
-func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles {
- ctx := indexerMetricsHandles{
- blocks: metrics.NewCounter(),
- transactions: metrics.NewCounter(),
- receipts: metrics.NewCounter(),
- logs: metrics.NewCounter(),
- accessListEntries: metrics.NewCounter(),
- tFreePostgres: metrics.NewTimer(),
- tPostgresCommit: metrics.NewTimer(),
- tHeaderProcessing: metrics.NewTimer(),
- tUncleProcessing: metrics.NewTimer(),
- tTxAndRecProcessing: metrics.NewTimer(),
- tStateStoreCodeProcessing: metrics.NewTimer(),
+func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
+ ctx := IndexerMetricsHandles{
+ Blocks: metrics.NewCounter(),
+ Transactions: metrics.NewCounter(),
+ Receipts: metrics.NewCounter(),
+ Logs: metrics.NewCounter(),
+ AccessListEntries: metrics.NewCounter(),
+ TimeFreePostgres: metrics.NewTimer(),
+ TimePostgresCommit: metrics.NewTimer(),
+ TimeHeaderProcessing: metrics.NewTimer(),
+ TimeUncleProcessing: metrics.NewTimer(),
+ TimeTxAndRecProcessing: metrics.NewTimer(),
+ TimeStateStoreCodeProcessing: metrics.NewTimer(),
}
subsys := "indexer"
- reg.Register(metricName(subsys, "blocks"), ctx.blocks)
- reg.Register(metricName(subsys, "transactions"), ctx.transactions)
- reg.Register(metricName(subsys, "receipts"), ctx.receipts)
- reg.Register(metricName(subsys, "logs"), ctx.logs)
- reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries)
- reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres)
- reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit)
- reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing)
- reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing)
- reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing)
- reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing)
+ reg.Register(metricName(subsys, "blocks"), ctx.Blocks)
+ reg.Register(metricName(subsys, "transactions"), ctx.Transactions)
+ reg.Register(metricName(subsys, "receipts"), ctx.Receipts)
+ reg.Register(metricName(subsys, "logs"), ctx.Logs)
+ reg.Register(metricName(subsys, "access_list_entries"), ctx.AccessListEntries)
+ reg.Register(metricName(subsys, "t_free_postgres"), ctx.TimeFreePostgres)
+ reg.Register(metricName(subsys, "t_postgres_commit"), ctx.TimePostgresCommit)
+ reg.Register(metricName(subsys, "t_header_processing"), ctx.TimeHeaderProcessing)
+ reg.Register(metricName(subsys, "t_uncle_processing"), ctx.TimeUncleProcessing)
+ reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.TimeTxAndRecProcessing)
+ reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.TimeStateStoreCodeProcessing)
return ctx
}
@@ -135,7 +137,7 @@ func RegisterDBMetrics(reg metrics.Registry) dbMetricsHandles {
return ctx
}
-func (met *dbMetricsHandles) Update(stats Stats) {
+func (met *dbMetricsHandles) Update(stats interfaces.Stats) {
met.maxOpen.Update(stats.MaxOpen())
met.open.Update(stats.Open())
met.inUse.Update(stats.InUse())
diff --git a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go
index 768652b46..37ea11881 100644
--- a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go
+++ b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go
@@ -20,6 +20,8 @@ import (
"context"
"testing"
+ nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
+
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
@@ -34,10 +36,10 @@ func setupLegacyPGX(t *testing.T) {
mockLegacyBlock = legacyData.MockBlock
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
- db, err = postgres.SetupPGXDB()
+ db, err = postgres.SetupV3PGXDB()
require.NoError(t, err)
- ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db)
+ ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil)
require.NoError(t, err)
var tx interfaces.Batch
tx, err = ind.PushBlock(
@@ -52,7 +54,7 @@ func setupLegacyPGX(t *testing.T) {
}
}()
for _, node := range legacyData.StateDiffs {
- err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String())
+ err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String(), 0)
require.NoError(t, err)
}
diff --git a/statediff/indexer/database/sql/pgx_indexer_test.go b/statediff/indexer/database/sql/pgx_indexer_test.go
index 110f5f223..deb4bd6c6 100644
--- a/statediff/indexer/database/sql/pgx_indexer_test.go
+++ b/statediff/indexer/database/sql/pgx_indexer_test.go
@@ -32,17 +32,19 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
+ nodeInfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
func setupPGX(t *testing.T) {
- db, err = postgres.SetupPGXDB()
+ db, err = postgres.SetupV3PGXDB()
if err != nil {
t.Fatal(err)
}
- ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
+ ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeInfo.Info{}, db, nil)
require.NoError(t, err)
var tx interfaces.Batch
tx, err = ind.PushBlock(
@@ -58,7 +60,7 @@ func setupPGX(t *testing.T) {
}
}()
for _, node := range mocks.StateDiffs {
- err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
+ err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0)
require.NoError(t, err)
}
@@ -197,7 +199,7 @@ func TestPGXIndexer(t *testing.T) {
if txRes.Value != transactions[3].Value().String() {
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
- accessListElementModels := make([]models.AccessListElementModel, 0)
+ accessListElementModels := make([]v3Models.AccessListElementModel, 0)
pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
if err != nil {
@@ -206,11 +208,11 @@ func TestPGXIndexer(t *testing.T) {
if len(accessListElementModels) != 2 {
t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
}
- model1 := models.AccessListElementModel{
+ model1 := v3Models.AccessListElementModel{
Index: accessListElementModels[0].Index,
Address: accessListElementModels[0].Address,
}
- model2 := models.AccessListElementModel{
+ model2 := v3Models.AccessListElementModel{
Index: accessListElementModels[1].Index,
Address: accessListElementModels[1].Address,
StorageKeys: accessListElementModels[1].StorageKeys,
@@ -313,7 +315,7 @@ func TestPGXIndexer(t *testing.T) {
expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
for idx, c := range rcts {
- result := make([]models.IPLDModel, 0)
+ result := make([]sharedModels.IPLDModel, 0)
pgStr = `SELECT data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
@@ -396,7 +398,7 @@ func TestPGXIndexer(t *testing.T) {
setupPGX(t)
defer tearDown(t)
// check that state nodes were properly indexed and published
- stateNodes := make([]models.StateNodeModel, 0)
+ stateNodes := make([]v3Models.StateNodeModel, 0)
pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type != 3`
@@ -418,7 +420,7 @@ func TestPGXIndexer(t *testing.T) {
t.Fatal(err)
}
pgStr = `SELECT header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
- var account models.StateAccountModel
+ var account v3Models.StateAccountModel
err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path)
if err != nil {
t.Fatal(err)
@@ -428,7 +430,7 @@ func TestPGXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "0",
@@ -442,7 +444,7 @@ func TestPGXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "1000",
@@ -454,7 +456,7 @@ func TestPGXIndexer(t *testing.T) {
}
// check that Removed state nodes were properly indexed and published
- stateNodes = make([]models.StateNodeModel, 0)
+ stateNodes = make([]v3Models.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type = 3`
@@ -485,7 +487,7 @@ func TestPGXIndexer(t *testing.T) {
setupPGX(t)
defer tearDown(t)
// check that storage nodes were properly indexed
- storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes := make([]v3Models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -497,7 +499,7 @@ func TestPGXIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{
CID: storageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
@@ -518,7 +520,7 @@ func TestPGXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
// check that Removed storage nodes were properly indexed
- storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes = make([]v3Models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -530,7 +532,7 @@ func TestPGXIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go
index 095b4dd24..3ef5c71c9 100644
--- a/statediff/indexer/database/sql/postgres/config.go
+++ b/statediff/indexer/database/sql/postgres/config.go
@@ -54,6 +54,17 @@ var DefaultConfig = Config{
Password: "password",
}
+// MultiConfig holds multiple configs
+type MultiConfig struct {
+ V2 Config
+ V3 Config
+}
+
+// Type satisfies interfaces.Config
+func (mc MultiConfig) Type() shared.DBType {
+ return shared.POSTGRES
+}
+
// Config holds params for a Postgres db
type Config struct {
// conn string params
diff --git a/statediff/indexer/database/sql/postgres/pgx.go b/statediff/indexer/database/sql/postgres/pgx.go
index 936a3765d..b720f7fe5 100644
--- a/statediff/indexer/database/sql/postgres/pgx.go
+++ b/statediff/indexer/database/sql/postgres/pgx.go
@@ -25,21 +25,18 @@ import (
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
- "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
- "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
)
// PGXDriver driver, implements sql.Driver
type PGXDriver struct {
- ctx context.Context
- pool *pgxpool.Pool
- nodeInfo node.Info
- nodeID string
+ ctx context.Context
+ pool *pgxpool.Pool
}
// NewPGXDriver returns a new pgx driver
// it initializes the connection pool and creates the node info table
-func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
+func NewPGXDriver(ctx context.Context, config Config) (*PGXDriver, error) {
pgConf, err := MakeConfig(config)
if err != nil {
return nil, err
@@ -48,11 +45,7 @@ func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDrive
if err != nil {
return nil, ErrDBConnectionFailed(err)
}
- pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node}
- nodeErr := pg.createNode()
- if nodeErr != nil {
- return &PGXDriver{}, ErrUnableToSetNode(nodeErr)
- }
+ pg := &PGXDriver{ctx: ctx, pool: dbPool}
return pg, nil
}
@@ -88,27 +81,13 @@ func MakeConfig(config Config) (*pgxpool.Config, error) {
return conf, nil
}
-func (pgx *PGXDriver) createNode() error {
- _, err := pgx.pool.Exec(
- pgx.ctx,
- createNodeStm,
- pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID,
- pgx.nodeInfo.ID, pgx.nodeInfo.ClientName,
- pgx.nodeInfo.ChainID)
- if err != nil {
- return ErrUnableToSetNode(err)
- }
- pgx.nodeID = pgx.nodeInfo.ID
- return nil
-}
-
// QueryRow satisfies sql.Database
-func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow {
return pgx.pool.QueryRow(ctx, sql, args...)
}
// Exec satisfies sql.Database
-func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) {
res, err := pgx.pool.Exec(ctx, sql, args...)
return resultWrapper{ct: res}, err
}
@@ -124,7 +103,7 @@ func (pgx *PGXDriver) Get(ctx context.Context, dest interface{}, query string, a
}
// Begin satisfies sql.Database
-func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) {
+func (pgx *PGXDriver) Begin(ctx context.Context) (interfaces.Tx, error) {
tx, err := pgx.pool.Begin(ctx)
if err != nil {
return nil, err
@@ -132,16 +111,11 @@ func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) {
return pgxTxWrapper{tx: tx}, nil
}
-func (pgx *PGXDriver) Stats() sql.Stats {
+func (pgx *PGXDriver) Stats() interfaces.Stats {
stats := pgx.pool.Stat()
return pgxStatsWrapper{stats: stats}
}
-// NodeID satisfies sql.Database
-func (pgx *PGXDriver) NodeID() string {
- return pgx.nodeID
-}
-
// Close satisfies sql.Database/io.Closer
func (pgx *PGXDriver) Close() error {
pgx.pool.Close()
@@ -212,12 +186,12 @@ type pgxTxWrapper struct {
}
// QueryRow satisfies sql.Tx
-func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow {
return t.tx.QueryRow(ctx, sql, args...)
}
// Exec satisfies sql.Tx
-func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) {
res, err := t.tx.Exec(ctx, sql, args...)
return resultWrapper{ct: res}, err
}
diff --git a/statediff/indexer/database/sql/postgres/sqlx.go b/statediff/indexer/database/sql/postgres/sqlx.go
index 406b44a19..733c35734 100644
--- a/statediff/indexer/database/sql/postgres/sqlx.go
+++ b/statediff/indexer/database/sql/postgres/sqlx.go
@@ -21,23 +21,20 @@ import (
coresql "database/sql"
"time"
- "github.com/jmoiron/sqlx"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
- "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
- "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ "github.com/jmoiron/sqlx"
)
// SQLXDriver driver, implements sql.Driver
type SQLXDriver struct {
- ctx context.Context
- db *sqlx.DB
- nodeInfo node.Info
- nodeID string
+ ctx context.Context
+ db *sqlx.DB
}
// NewSQLXDriver returns a new sqlx driver for Postgres
// it initializes the connection pool and creates the node info table
-func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
+func NewSQLXDriver(ctx context.Context, config Config) (*SQLXDriver, error) {
db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString())
if err != nil {
return &SQLXDriver{}, ErrDBConnectionFailed(err)
@@ -52,33 +49,17 @@ func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDri
lifetime := config.MaxConnLifetime
db.SetConnMaxLifetime(lifetime)
}
- driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node}
- if err := driver.createNode(); err != nil {
- return &SQLXDriver{}, ErrUnableToSetNode(err)
- }
+ driver := &SQLXDriver{ctx: ctx, db: db}
return driver, nil
}
-func (driver *SQLXDriver) createNode() error {
- _, err := driver.db.Exec(
- createNodeStm,
- driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID,
- driver.nodeInfo.ID, driver.nodeInfo.ClientName,
- driver.nodeInfo.ChainID)
- if err != nil {
- return ErrUnableToSetNode(err)
- }
- driver.nodeID = driver.nodeInfo.ID
- return nil
-}
-
// QueryRow satisfies sql.Database
-func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) sql.ScannableRow {
+func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) interfaces.ScannableRow {
return driver.db.QueryRowx(sql, args...)
}
// Exec satisfies sql.Database
-func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (sql.Result, error) {
+func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (interfaces.Result, error) {
return driver.db.Exec(sql, args...)
}
@@ -93,7 +74,7 @@ func (driver *SQLXDriver) Get(_ context.Context, dest interface{}, query string,
}
// Begin satisfies sql.Database
-func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) {
+func (driver *SQLXDriver) Begin(_ context.Context) (interfaces.Tx, error) {
tx, err := driver.db.Beginx()
if err != nil {
return nil, err
@@ -101,16 +82,11 @@ func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) {
return sqlxTxWrapper{tx: tx}, nil
}
-func (driver *SQLXDriver) Stats() sql.Stats {
+func (driver *SQLXDriver) Stats() interfaces.Stats {
stats := driver.db.Stats()
return sqlxStatsWrapper{stats: stats}
}
-// NodeID satisfies sql.Database
-func (driver *SQLXDriver) NodeID() string {
- return driver.nodeID
-}
-
// Close satisfies sql.Database/io.Closer
func (driver *SQLXDriver) Close() error {
return driver.db.Close()
@@ -170,12 +146,12 @@ type sqlxTxWrapper struct {
}
// QueryRow satisfies sql.Tx
-func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow {
+func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow {
return t.tx.QueryRowx(sql, args...)
}
// Exec satisfies sql.Tx
-func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
+func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) {
return t.tx.Exec(sql, args...)
}
diff --git a/statediff/indexer/database/sql/postgres/test_helpers.go b/statediff/indexer/database/sql/postgres/test_helpers.go
index 491701c4b..db29584b4 100644
--- a/statediff/indexer/database/sql/postgres/test_helpers.go
+++ b/statediff/indexer/database/sql/postgres/test_helpers.go
@@ -19,24 +19,43 @@ package postgres
import (
"context"
- "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
- "github.com/ethereum/go-ethereum/statediff/indexer/node"
+ v2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v2"
+ v3 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v3"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
)
-// SetupSQLXDB is used to setup a sqlx db for tests
-func SetupSQLXDB() (sql.Database, error) {
- driver, err := NewSQLXDriver(context.Background(), DefaultConfig, node.Info{})
+// SetupV3SQLXDB is used to setup a sqlx db for tests
+func SetupV3SQLXDB() (interfaces.Database, error) {
+ driver, err := NewSQLXDriver(context.Background(), DefaultConfig)
if err != nil {
return nil, err
}
- return NewPostgresDB(driver), nil
+ return v3.NewPostgresDB(driver), nil
}
-// SetupPGXDB is used to setup a pgx db for tests
-func SetupPGXDB() (sql.Database, error) {
- driver, err := NewPGXDriver(context.Background(), DefaultConfig, node.Info{})
+// SetupV3PGXDB is used to setup a pgx db for tests
+func SetupV3PGXDB() (interfaces.Database, error) {
+ driver, err := NewPGXDriver(context.Background(), DefaultConfig)
if err != nil {
return nil, err
}
- return NewPostgresDB(driver), nil
+ return v3.NewPostgresDB(driver), nil
+}
+
+// SetupV2SQLXDB is used to setup a sqlx db for tests
+func SetupV2SQLXDB() (interfaces.Database, error) {
+ driver, err := NewSQLXDriver(context.Background(), DefaultConfig)
+ if err != nil {
+ return nil, err
+ }
+ return v2.NewPostgresDB(driver), nil
+}
+
+// SetupV2PGXDB is used to setup a pgx db for tests
+func SetupV2PGXDB() (interfaces.Database, error) {
+ driver, err := NewPGXDriver(context.Background(), DefaultConfig)
+ if err != nil {
+ return nil, err
+ }
+ return v2.NewPostgresDB(driver), nil
}
diff --git a/statediff/indexer/database/sql/postgres/v2/database.go b/statediff/indexer/database/sql/postgres/v2/database.go
new file mode 100644
index 000000000..f632e09c5
--- /dev/null
+++ b/statediff/indexer/database/sql/postgres/v2/database.go
@@ -0,0 +1,115 @@
+// VulcanizeDB
+// Copyright © 2021 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package postgres
+
+import (
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+)
+
+var _ interfaces.Database = &DB{}
+
+const version = 2
+
+// NewPostgresDB returns a postgres.DB using the provided driver
+func NewPostgresDB(driver interfaces.Driver) *DB {
+ return &DB{driver}
+}
+
+// DB implements sql.Database using a configured driver and Postgres statement syntax
+type DB struct {
+ interfaces.Driver
+}
+
+// InsertNodeInfoStm satisfies interfaces.Statements
+func (db *DB) InsertNodeInfoStm() string {
+ return `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING`
+}
+
+// InsertHeaderStm satisfies the interfaces.Statements
+func (db *DB) InsertHeaderStm() string {
+ return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee)
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
+ ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
+ RETURNING id`
+}
+
+// InsertUncleStm satisfies the interfaces.Statements
+func (db *DB) InsertUncleStm() string {
+ return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
+ ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)`
+}
+
+// InsertTxStm satisfies the interfaces.Statements
+func (db *DB) InsertTxStm() string {
+ return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
+ ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9)
+ RETURNING id`
+}
+
+// InsertAccessListElementStm satisfies the interfaces.Statements
+func (db *DB) InsertAccessListElementStm() string {
+ return `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
+ ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)`
+}
+
+// InsertRctStm satisfies the interfaces.Statements
+func (db *DB) InsertRctStm() string {
+ return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8)
+ RETURNING id`
+}
+
+// InsertLogStm satisfies the interfaces.Statements
+func (db *DB) InsertLogStm() string {
+ return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key ,address, topic0, topic1, topic2, topic3,log_data ) = ($1, $2, $4, $6, $7, $8, $9, $10)`
+}
+
+// InsertStateStm satisfies the interfaces.Statements
+func (db *DB) InsertStateStm() string {
+ return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
+ RETURNING id`
+}
+
+// InsertAccountStm satisfies the interfaces.Statements
+func (db *DB) InsertAccountStm() string {
+ return `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)`
+}
+
+// InsertStorageStm satisfies the interfaces.Statements
+func (db *DB) InsertStorageStm() string {
+ return `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`
+}
+
+// InsertIPLDStm satisfies the interfaces.Statements
+func (db *DB) InsertIPLDStm() string {
+ return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
+}
+
+// InsertIPLDsStm satisfies the interfaces.Statements
+func (db *DB) InsertIPLDsStm() string {
+ return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
+}
+
+// Version satisfies the interfaces.Version
+func (db *DB) Version() uint {
+ return version
+}
diff --git a/statediff/indexer/database/sql/postgres/database.go b/statediff/indexer/database/sql/postgres/v3/database.go
similarity index 59%
rename from statediff/indexer/database/sql/postgres/database.go
rename to statediff/indexer/database/sql/postgres/v3/database.go
index 4cff518a0..f214acc7f 100644
--- a/statediff/indexer/database/sql/postgres/database.go
+++ b/statediff/indexer/database/sql/postgres/v3/database.go
@@ -16,86 +16,96 @@
package postgres
-import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
-
-var _ sql.Database = &DB{}
-
-const (
- createNodeStm = `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
- ON CONFLICT (node_id) DO NOTHING`
+import (
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
)
+var _ interfaces.Database = &DB{}
+
+const version = 3
+
// NewPostgresDB returns a postgres.DB using the provided driver
-func NewPostgresDB(driver sql.Driver) *DB {
+func NewPostgresDB(driver interfaces.Driver) *DB {
return &DB{driver}
}
// DB implements sql.Database using a configured driver and Postgres statement syntax
type DB struct {
- sql.Driver
+ interfaces.Driver
}
-// InsertHeaderStm satisfies the sql.Statements interface
+// InsertNodeInfoStm satisfies interfaces.Statements
+func (db *DB) InsertNodeInfoStm() string {
+ return `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
+ ON CONFLICT (node_id) DO NOTHING`
+}
+
+// InsertHeaderStm satisfies the interfaces.Statements
func (db *DB) InsertHeaderStm() string {
return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
- ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
+ ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)`
}
-// InsertUncleStm satisfies the sql.Statements interface
+// InsertUncleStm satisfies the interfaces.Statements
func (db *DB) InsertUncleStm() string {
return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
- ON CONFLICT (block_hash) DO NOTHING`
+ ON CONFLICT (block_hash) DO NOTHING`
}
-// InsertTxStm satisfies the sql.Statements interface
+// InsertTxStm satisfies the interfaces.Statements
func (db *DB) InsertTxStm() string {
return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
- ON CONFLICT (tx_hash) DO NOTHING`
+ ON CONFLICT (tx_hash) DO NOTHING`
}
-// InsertAccessListElementStm satisfies the sql.Statements interface
+// InsertAccessListElementStm satisfies the interfaces.Statements
func (db *DB) InsertAccessListElementStm() string {
return `INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
- ON CONFLICT (tx_id, index) DO NOTHING`
+ ON CONFLICT (tx_id, index) DO NOTHING`
}
-// InsertRctStm satisfies the sql.Statements interface
+// InsertRctStm satisfies the interfaces.Statements
func (db *DB) InsertRctStm() string {
return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
- ON CONFLICT (tx_id) DO NOTHING`
+ ON CONFLICT (tx_id) DO NOTHING`
}
-// InsertLogStm satisfies the sql.Statements interface
+// InsertLogStm satisfies the interfaces.Statements
func (db *DB) InsertLogStm() string {
return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
- ON CONFLICT (rct_id, index) DO NOTHING`
+ ON CONFLICT (rct_id, index) DO NOTHING`
}
-// InsertStateStm satisfies the sql.Statements interface
+// InsertStateStm satisfies the interfaces.Statements
func (db *DB) InsertStateStm() string {
return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
- ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`
+ ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`
}
-// InsertAccountStm satisfies the sql.Statements interface
+// InsertAccountStm satisfies the interfaces.Statements
func (db *DB) InsertAccountStm() string {
return `INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6)
- ON CONFLICT (header_id, state_path) DO NOTHING`
+ ON CONFLICT (header_id, state_path) DO NOTHING`
}
-// InsertStorageStm satisfies the sql.Statements interface
+// InsertStorageStm satisfies the interfaces.Statements
func (db *DB) InsertStorageStm() string {
return `INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
- ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)`
+ ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)`
}
-// InsertIPLDStm satisfies the sql.Statements interface
+// InsertIPLDStm satisfies the interfaces.Statements
func (db *DB) InsertIPLDStm() string {
return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`
}
-// InsertIPLDsStm satisfies the sql.Statements interface
+// InsertIPLDsStm satisfies the interfaces.Statements
func (db *DB) InsertIPLDsStm() string {
return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING`
}
+
+// Version satisfies the interfaces.Version
+func (db *DB) Version() uint {
+ return version
+}
diff --git a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
index 08f3f080e..95d03cb8e 100644
--- a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
+++ b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go
@@ -20,6 +20,8 @@ import (
"context"
"testing"
+ nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
+
"github.com/ipfs/go-cid"
"github.com/jmoiron/sqlx"
"github.com/multiformats/go-multihash"
@@ -44,10 +46,10 @@ func setupLegacySQLX(t *testing.T) {
mockLegacyBlock = legacyData.MockBlock
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
- db, err = postgres.SetupSQLXDB()
+ db, err = postgres.SetupV3SQLXDB()
require.NoError(t, err)
- ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db)
+ ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil)
require.NoError(t, err)
var tx interfaces.Batch
tx, err = ind.PushBlock(
@@ -62,7 +64,7 @@ func setupLegacySQLX(t *testing.T) {
}
}()
for _, node := range legacyData.StateDiffs {
- err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String())
+ err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String(), 0)
require.NoError(t, err)
}
diff --git a/statediff/indexer/database/sql/sqlx_indexer_test.go b/statediff/indexer/database/sql/sqlx_indexer_test.go
index 5538a5f93..cd8ed4e33 100644
--- a/statediff/indexer/database/sql/sqlx_indexer_test.go
+++ b/statediff/indexer/database/sql/sqlx_indexer_test.go
@@ -33,17 +33,19 @@ import (
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared"
+ v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
+ nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
func setupSQLX(t *testing.T) {
- db, err = postgres.SetupSQLXDB()
+ db, err = postgres.SetupV3SQLXDB()
if err != nil {
t.Fatal(err)
}
- ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
+ ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeinfo.Info{}, db, nil)
require.NoError(t, err)
var tx interfaces.Batch
tx, err = ind.PushBlock(
@@ -59,7 +61,7 @@ func setupSQLX(t *testing.T) {
}
}()
for _, node := range mocks.StateDiffs {
- err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
+ err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0)
require.NoError(t, err)
}
@@ -200,7 +202,7 @@ func TestSQLXIndexer(t *testing.T) {
if txRes.Value != transactions[3].Value().String() {
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
- accessListElementModels := make([]models.AccessListElementModel, 0)
+ accessListElementModels := make([]v3Models.AccessListElementModel, 0)
pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC`
err = db.Select(context.Background(), &accessListElementModels, pgStr, c)
if err != nil {
@@ -209,11 +211,11 @@ func TestSQLXIndexer(t *testing.T) {
if len(accessListElementModels) != 2 {
t.Fatalf("expected two access list entries, got %d", len(accessListElementModels))
}
- model1 := models.AccessListElementModel{
+ model1 := v3Models.AccessListElementModel{
Index: accessListElementModels[0].Index,
Address: accessListElementModels[0].Address,
}
- model2 := models.AccessListElementModel{
+ model2 := v3Models.AccessListElementModel{
Index: accessListElementModels[1].Index,
Address: accessListElementModels[1].Address,
StorageKeys: accessListElementModels[1].StorageKeys,
@@ -314,7 +316,7 @@ func TestSQLXIndexer(t *testing.T) {
expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
for idx, c := range rcts {
- result := make([]models.IPLDModel, 0)
+ result := make([]sharedModels.IPLDModel, 0)
pgStr = `SELECT data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
@@ -396,7 +398,7 @@ func TestSQLXIndexer(t *testing.T) {
setupSQLX(t)
defer tearDown(t)
// check that state nodes were properly indexed and published
- stateNodes := make([]models.StateNodeModel, 0)
+ stateNodes := make([]v3Models.StateNodeModel, 0)
pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type != 3`
@@ -418,7 +420,7 @@ func TestSQLXIndexer(t *testing.T) {
t.Fatal(err)
}
pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2`
- var account models.StateAccountModel
+ var account v3Models.StateAccountModel
err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path)
if err != nil {
t.Fatal(err)
@@ -428,7 +430,7 @@ func TestSQLXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'})
test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "0",
@@ -442,7 +444,7 @@ func TestSQLXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex())
test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'})
test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode)
- test_helpers.ExpectEqual(t, account, models.StateAccountModel{
+ test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{
HeaderID: account.HeaderID,
StatePath: stateNode.Path,
Balance: "1000",
@@ -454,7 +456,7 @@ func TestSQLXIndexer(t *testing.T) {
}
// check that Removed state nodes were properly indexed and published
- stateNodes = make([]models.StateNodeModel, 0)
+ stateNodes = make([]v3Models.StateNodeModel, 0)
pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
WHERE header_cids.block_number = $1 AND node_type = 3`
@@ -485,7 +487,7 @@ func TestSQLXIndexer(t *testing.T) {
setupSQLX(t)
defer tearDown(t)
// check that storage nodes were properly indexed
- storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes := make([]v3Models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -497,7 +499,7 @@ func TestSQLXIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{
CID: storageCID.String(),
NodeType: 2,
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
@@ -518,7 +520,7 @@ func TestSQLXIndexer(t *testing.T) {
test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode)
// check that Removed storage nodes were properly indexed
- storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
+ storageNodes = make([]v3Models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
@@ -530,7 +532,7 @@ func TestSQLXIndexer(t *testing.T) {
t.Fatal(err)
}
test_helpers.ExpectEqual(t, len(storageNodes), 1)
- test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{
+ test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{
CID: shared.RemovedNodeStorageCID,
NodeType: 3,
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
diff --git a/statediff/indexer/database/sql/v2/writer.go b/statediff/indexer/database/sql/v2/writer.go
new file mode 100644
index 000000000..feeb49c56
--- /dev/null
+++ b/statediff/indexer/database/sql/v2/writer.go
@@ -0,0 +1,222 @@
+// VulcanizeDB
+// Copyright © 2019 Vulcanize
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package sql
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models/v2"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
+)
+
+var (
+ nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
+)
+
+// Writer handles processing and writing of indexed IPLD objects to Postgres
+type Writer struct {
+ DB interfaces.Database
+ metrics sql.IndexerMetricsHandles
+ nodeID int64
+}
+
+// NewWriter creates a new pointer to a Writer
+func NewWriter(db interfaces.Database) *Writer {
+ return &Writer{
+ DB: db,
+ }
+}
+
+// Close satisfies io.Closer
+func (w *Writer) Close() error {
+ return w.DB.Close()
+}
+
+/*
+InsertNodeInfo inserts a node info model
+INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
+ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING
+*/
+func (w *Writer) InsertNodeInfo(info node.Info) error {
+ var nodeID int64
+ if err := w.DB.QueryRow(w.DB.Context(), w.DB.InsertNodeInfoStm(), info.GenesisBlock, info.NetworkID, info.ID,
+ info.ClientName, info.ChainID).Scan(&nodeID); err != nil {
+ return err
+ }
+ w.nodeID = nodeID
+ return nil
+}
+
+/*
+InsertHeaderCID inserts a header model
+INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee)
+VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
+ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
+*/
+func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header *models.HeaderModel) (int64, error) {
+ var headerID int64
+ err := tx.QueryRow(w.DB.Context(), w.DB.InsertHeaderStm(),
+ header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.nodeID,
+ header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom,
+ header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID)
+ if err != nil {
+ return 0, fmt.Errorf("error inserting header_cids entry: %v", err)
+ }
+ w.metrics.Blocks.Inc(1)
+ return headerID, nil
+}
+
+/*
+InsertUncleCID inserts an uncle model
+INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
+ON CONFLICT (header_id, block_hash) DO NOTHING
+*/
+func (w *Writer) InsertUncleCID(tx interfaces.Tx, uncle *models.UncleModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertUncleStm(),
+ uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
+ if err != nil {
+ return fmt.Errorf("error inserting uncle_cids entry: %v", err)
+ }
+ return nil
+}
+
+/*
+InsertTransactionCID inserts a tx model
+INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
+ON CONFLICT (header_id, tx_hash) DO NOTHING
+*/
+func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxModel) (int64, error) {
+ var txID int64
+ err := tx.QueryRow(w.DB.Context(), w.DB.InsertTxStm(),
+ transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index,
+ transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID)
+ if err != nil {
+ return 0, fmt.Errorf("error inserting transaction_cids entry: %v", err)
+ }
+ w.metrics.Transactions.Inc(1)
+ return txID, nil
+}
+
+/*
+InsertAccessListElement inserts an access list element model
+INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
+ON CONFLICT (tx_id, index) DO NOTHING
+*/
+func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *models.AccessListElementModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(),
+ accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
+ if err != nil {
+ return fmt.Errorf("error inserting access_list_element entry: %v", err)
+ }
+ w.metrics.AccessListEntries.Inc(1)
+ return nil
+}
+
+/*
+InsertReceiptCID inserts a receipt model
+INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ON CONFLICT (tx_id) DO NOTHING
+*/
+func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) (int64, error) {
+ var receiptID int64
+ err := tx.QueryRow(w.DB.Context(), w.DB.InsertRctStm(),
+ rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID)
+ if err != nil {
+ return 0, fmt.Errorf("error inserting receipt_cids entry: %w", err)
+ }
+ w.metrics.Receipts.Inc(1)
+ return receiptID, nil
+}
+
+/*
+InsertLogCID inserts a log model
+INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
+ON CONFLICT (receipt_id, index) DO NOTHING
+*/
+func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error {
+ for _, log := range logs {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertLogStm(),
+ log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2,
+ log.Topic3, log.Data)
+ if err != nil {
+ return fmt.Errorf("error inserting logs entry: %w", err)
+ }
+ w.metrics.Logs.Inc(1)
+ }
+ return nil
+}
+
+/*
+InsertStateCID inserts a state model
+INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
+*/
+func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeModel) (int64, error) {
+ var stateID int64
+ var stateKey string
+ if stateNode.StateKey != nullHash.String() {
+ stateKey = stateNode.StateKey
+ }
+ err := tx.QueryRow(w.DB.Context(), w.DB.InsertStateStm(),
+ stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID)
+ if err != nil {
+ return 0, fmt.Errorf("error inserting state_cids entry: %v", err)
+ }
+ return stateID, nil
+}
+
+/*
+InsertStateAccount inserts a state account model
+INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6)
+ON CONFLICT (state_id) DO NOTHING
+*/
+func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.StateAccountModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccountStm(),
+ stateAccount.StateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash,
+ stateAccount.StorageRoot)
+ if err != nil {
+ return fmt.Errorf("error inserting state_accounts entry: %v", err)
+ }
+ return nil
+}
+
+/*
+InsertStorageCID inserts a storage model
+INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
+*/
+func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNodeModel) error {
+ var storageKey string
+ if storageCID.StorageKey != nullHash.String() {
+ storageKey = storageCID.StorageKey
+ }
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertStorageStm(),
+ storageCID.StateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType,
+ true, storageCID.MhKey)
+ if err != nil {
+ return fmt.Errorf("error inserting storage_cids entry: %v", err)
+ }
+ return nil
+}
+
+// Stats returns the stats for the underlying DB
+func (w *Writer) Stats() interfaces.Stats {
+ return w.DB.Stats()
+}
diff --git a/statediff/indexer/database/sql/writer.go b/statediff/indexer/database/sql/v3/writer.go
similarity index 60%
rename from statediff/indexer/database/sql/writer.go
rename to statediff/indexer/database/sql/v3/writer.go
index 3f1dfc0b5..593cb3339 100644
--- a/statediff/indexer/database/sql/writer.go
+++ b/statediff/indexer/database/sql/v3/writer.go
@@ -20,7 +20,10 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
+ "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
+ "github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
+ "github.com/ethereum/go-ethereum/statediff/indexer/models/v3"
+ "github.com/ethereum/go-ethereum/statediff/indexer/node"
)
var (
@@ -29,156 +32,181 @@ var (
// Writer handles processing and writing of indexed IPLD objects to Postgres
type Writer struct {
- db Database
+ DB interfaces.Database
+ metrics sql.IndexerMetricsHandles
+ nodeID string
}
// NewWriter creates a new pointer to a Writer
-func NewWriter(db Database) *Writer {
+func NewWriter(db interfaces.Database) *Writer {
return &Writer{
- db: db,
+ DB: db,
}
}
// Close satisfies io.Closer
func (w *Writer) Close() error {
- return w.db.Close()
+ return w.DB.Close()
}
/*
+InsertNodeInfo inserts a node info model
+INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5)
+ON CONFLICT (node_id) DO NOTHING
+*/
+func (w *Writer) InsertNodeInfo(info node.Info) error {
+ if _, err := w.DB.Exec(w.DB.Context(), w.DB.InsertNodeInfoStm(), info.GenesisBlock, info.NetworkID, info.ID,
+ info.ClientName, info.ChainID); err != nil {
+ return err
+ }
+ w.nodeID = info.ID
+ return nil
+}
+
+/*
+InsertHeaderCID inserts a header model
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
ON CONFLICT (block_hash) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)
*/
-func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertHeaderStm(),
- header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.db.NodeID(),
+func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header models.HeaderModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertHeaderStm(),
+ header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.nodeID,
header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom,
header.Timestamp, header.MhKey, 1, header.Coinbase)
if err != nil {
- return fmt.Errorf("error upserting header_cids entry: %v", err)
+ return fmt.Errorf("error inserting header_cids entry: %v", err)
}
- indexerMetrics.blocks.Inc(1)
+ w.metrics.Blocks.Inc(1)
return nil
}
/*
+InsertUncleCID inserts an uncle model
INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (block_hash) DO NOTHING
*/
-func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertUncleStm(),
+func (w *Writer) InsertUncleCID(tx interfaces.Tx, uncle *models.UncleModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertUncleStm(),
uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)
if err != nil {
- return fmt.Errorf("error upserting uncle_cids entry: %v", err)
+ return fmt.Errorf("error inserting uncle_cids entry: %v", err)
}
return nil
}
/*
+InsertTransactionCID inserts a tx model
INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (tx_hash) DO NOTHING
*/
-func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertTxStm(),
+func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertTxStm(),
transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index,
transaction.MhKey, transaction.Data, transaction.Type, transaction.Value)
if err != nil {
- return fmt.Errorf("error upserting transaction_cids entry: %v", err)
+ return fmt.Errorf("error inserting transaction_cids entry: %v", err)
}
- indexerMetrics.transactions.Inc(1)
+ w.metrics.Transactions.Inc(1)
return nil
}
/*
+InsertAccessListElement inserts an access list element model
INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4)
ON CONFLICT (tx_id, index) DO NOTHING
*/
-func (w *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertAccessListElementStm(),
+func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *models.AccessListElementModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(),
accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys)
if err != nil {
- return fmt.Errorf("error upserting access_list_element entry: %v", err)
+ return fmt.Errorf("error inserting access_list_element entry: %v", err)
}
- indexerMetrics.accessListEntries.Inc(1)
+ w.metrics.AccessListEntries.Inc(1)
return nil
}
/*
+InsertReceiptCID inserts a receipt model
INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (tx_id) DO NOTHING
*/
-func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertRctStm(),
+func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertRctStm(),
rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot)
if err != nil {
- return fmt.Errorf("error upserting receipt_cids entry: %w", err)
+ return fmt.Errorf("error inserting receipt_cids entry: %w", err)
}
- indexerMetrics.receipts.Inc(1)
+ w.metrics.Receipts.Inc(1)
return nil
}
/*
+InsertLogCID inserts a log model
INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (rct_id, index) DO NOTHING
*/
-func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error {
+func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error {
for _, log := range logs {
- _, err := tx.Exec(w.db.Context(), w.db.InsertLogStm(),
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertLogStm(),
log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2,
log.Topic3, log.Data)
if err != nil {
- return fmt.Errorf("error upserting logs entry: %w", err)
+ return fmt.Errorf("error inserting logs entry: %w", err)
}
- indexerMetrics.logs.Inc(1)
+ w.metrics.Logs.Inc(1)
}
return nil
}
/*
+InsertStateCID inserts a state model
INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
*/
-func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error {
+func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeModel) error {
var stateKey string
if stateNode.StateKey != nullHash.String() {
stateKey = stateNode.StateKey
}
- _, err := tx.Exec(w.db.Context(), w.db.InsertStateStm(),
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertStateStm(),
stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey)
if err != nil {
- return fmt.Errorf("error upserting state_cids entry: %v", err)
+ return fmt.Errorf("error inserting state_cids entry: %v", err)
}
return nil
}
/*
+InsertStateAccount inserts a state account model
INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (header_id, state_path) DO NOTHING
*/
-func (w *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel) error {
- _, err := tx.Exec(w.db.Context(), w.db.InsertAccountStm(),
+func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.StateAccountModel) error {
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccountStm(),
stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash,
stateAccount.StorageRoot)
if err != nil {
- return fmt.Errorf("error upserting state_accounts entry: %v", err)
+ return fmt.Errorf("error inserting state_accounts entry: %v", err)
}
return nil
}
/*
+InsertStorageCID inserts a storage model
INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)
*/
-func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) error {
+func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNodeModel) error {
var storageKey string
if storageCID.StorageKey != nullHash.String() {
storageKey = storageCID.StorageKey
}
- _, err := tx.Exec(w.db.Context(), w.db.InsertStorageStm(),
+ _, err := tx.Exec(w.DB.Context(), w.DB.InsertStorageStm(),
storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType,
true, storageCID.MhKey)
if err != nil {
- return fmt.Errorf("error upserting storage_cids entry: %v", err)
+ return fmt.Errorf("error inserting storage_cids entry: %v", err)
}
return nil
}
diff --git a/statediff/indexer/interfaces/interfaces.go b/statediff/indexer/interfaces/interfaces.go
index 8f951230d..621dcd6e7 100644
--- a/statediff/indexer/interfaces/interfaces.go
+++ b/statediff/indexer/interfaces/interfaces.go
@@ -17,6 +17,7 @@
package interfaces
import (
+ "context"
"io"
"math/big"
"time"
@@ -28,10 +29,11 @@ import (
// StateDiffIndexer interface required to index statediff data
type StateDiffIndexer interface {
- PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
- PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerID string) error
+ PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, int64, error)
+ PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error
PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error
- ReportDBMetrics(delay time.Duration, quit <-chan bool)
+ ReportOldDBMetrics(delay time.Duration, quit <-chan bool)
+ ReportNewDBMetrics(delay time.Duration, quit <-chan bool)
io.Closer
}
@@ -44,3 +46,68 @@ type Batch interface {
type Config interface {
Type() shared.DBType
}
+
+// Database interfaces required by the sql indexer
+type Database interface {
+ Driver
+ Statements
+ Version() uint
+}
+
+// Driver interface has all the methods required by a driver implementation to support the sql indexer
+type Driver interface {
+ QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
+ Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
+ Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error
+ Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error
+ Begin(ctx context.Context) (Tx, error)
+ Stats() Stats
+ Context() context.Context
+ io.Closer
+}
+
+// Statements interface to accommodate different SQL query syntax
+type Statements interface {
+ InsertNodeInfoStm() string
+ InsertHeaderStm() string
+ InsertUncleStm() string
+ InsertTxStm() string
+ InsertAccessListElementStm() string
+ InsertRctStm() string
+ InsertLogStm() string
+ InsertStateStm() string
+ InsertAccountStm() string
+ InsertStorageStm() string
+ InsertIPLDStm() string
+ InsertIPLDsStm() string
+}
+
+// Tx interface to accommodate different concrete SQL transaction types
+type Tx interface {
+ QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow
+ Exec(ctx context.Context, sql string, args ...interface{}) (Result, error)
+ Commit(ctx context.Context) error
+ Rollback(ctx context.Context) error
+}
+
+// ScannableRow interface to accommodate different concrete row types
+type ScannableRow interface {
+ Scan(dest ...interface{}) error
+}
+
+// Result interface to accommodate different concrete result types
+type Result interface {
+ RowsAffected() (int64, error)
+}
+
+// Stats interface to accommodate different concrete sql stats types
+type Stats interface {
+ MaxOpen() int64
+ Open() int64
+ InUse() int64
+ Idle() int64
+ WaitCount() int64
+ WaitDuration() time.Duration
+ MaxIdleClosed() int64
+ MaxLifetimeClosed() int64
+}
diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go
index e5d72e5ba..e10c7d977 100644
--- a/statediff/indexer/mocks/test_data.go
+++ b/statediff/indexer/mocks/test_data.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
@@ -105,11 +104,11 @@ var (
Address: AnotherAddress,
StorageKeys: []common.Hash{common.BytesToHash(StorageLeafKey), common.BytesToHash(MockStorageLeafKey)},
}
- AccessListEntry1Model = models.AccessListElementModel{
+ AccessListEntry1Model = v2.AccessListElementModel{
Index: 0,
Address: Address.Hex(),
}
- AccessListEntry2Model = models.AccessListElementModel{
+ AccessListEntry2Model = v2.AccessListElementModel{
Index: 1,
Address: AnotherAddress.Hex(),
StorageKeys: []string{common.BytesToHash(StorageLeafKey).Hex(), common.BytesToHash(MockStorageLeafKey).Hex()},
diff --git a/statediff/indexer/models/v2/batch.go b/statediff/indexer/models/v2/batch.go
index 585d59403..92f7e0be0 100644
--- a/statediff/indexer/models/v2/batch.go
+++ b/statediff/indexer/models/v2/batch.go
@@ -18,12 +18,6 @@ package models
import "github.com/lib/pq"
-// IPLDBatch holds the arguments for a batch insert of IPLD data
-type IPLDBatch struct {
- Keys []string
- Values [][]byte
-}
-
// UncleBatch is the db model for eth.uncle_cids
type UncleBatch struct {
IDs []int64
diff --git a/statediff/indexer/models/v2/models.go b/statediff/indexer/models/v2/models.go
index d303826b7..cbb7c55f2 100644
--- a/statediff/indexer/models/v2/models.go
+++ b/statediff/indexer/models/v2/models.go
@@ -20,23 +20,23 @@ import "github.com/lib/pq"
// HeaderModel is the db model for eth.header_cids
type HeaderModel struct {
- ID int64 `db:"id"`
- BlockNumber string `db:"block_number"`
- BlockHash string `db:"block_hash"`
- ParentHash string `db:"parent_hash"`
- CID string `db:"cid"`
- MhKey string `db:"mh_key"`
- TotalDifficulty string `db:"td"`
- NodeID int64 `db:"node_id"`
- Reward string `db:"reward"`
- StateRoot string `db:"state_root"`
- UncleRoot string `db:"uncle_root"`
- TxRoot string `db:"tx_root"`
- RctRoot string `db:"receipt_root"`
- Bloom []byte `db:"bloom"`
- Timestamp uint64 `db:"timestamp"`
- TimesValidated int64 `db:"times_validated"`
- BaseFee *int64 `db:"base_fee"`
+ ID int64 `db:"id"`
+ BlockNumber string `db:"block_number"`
+ BlockHash string `db:"block_hash"`
+ ParentHash string `db:"parent_hash"`
+ CID string `db:"cid"`
+ MhKey string `db:"mh_key"`
+ TotalDifficulty string `db:"td"`
+ NodeID int64 `db:"node_id"`
+ Reward string `db:"reward"`
+ StateRoot string `db:"state_root"`
+ UncleRoot string `db:"uncle_root"`
+ TxRoot string `db:"tx_root"`
+ RctRoot string `db:"receipt_root"`
+ Bloom []byte `db:"bloom"`
+ Timestamp uint64 `db:"timestamp"`
+ TimesValidated int64 `db:"times_validated"`
+ BaseFee *string `db:"base_fee"`
}
// UncleModel is the db model for eth.uncle_cids
diff --git a/statediff/indexer/models/v3/batch.go b/statediff/indexer/models/v3/batch.go
index 16096f292..3df3244b3 100644
--- a/statediff/indexer/models/v3/batch.go
+++ b/statediff/indexer/models/v3/batch.go
@@ -18,12 +18,6 @@ package models
import "github.com/lib/pq"
-// IPLDBatch holds the arguments for a batch insert of IPLD data
-type IPLDBatch struct {
- Keys []string
- Values [][]byte
-}
-
// UncleBatch holds the arguments for a batch insert of uncle data
type UncleBatch struct {
HeaderID []string
diff --git a/statediff/service.go b/statediff/service.go
index c8c7649fd..88186b99d 100644
--- a/statediff/service.go
+++ b/statediff/service.go
@@ -164,7 +164,8 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params
if err != nil {
return err
}
- indexer.ReportDBMetrics(10*time.Second, quitCh)
+ indexer.ReportOldDBMetrics(10*time.Second, quitCh)
+ indexer.ReportNewDBMetrics(10*time.Second, quitCh)
}
workers := params.NumWorkers
if workers == 0 {
@@ -661,6 +662,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
// log.Info("Writing state diff", "block height", block.Number().Uint64())
var totalDifficulty *big.Int
var receipts types.Receipts
+ var headerID int64
var err error
var tx interfaces.Batch
if params.IncludeTD {
@@ -669,7 +671,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
if params.IncludeReceipts {
receipts = sds.BlockChain.GetReceiptsByHash(block.Hash())
}
- tx, err = sds.indexer.PushBlock(block, receipts, totalDifficulty)
+ tx, headerID, err = sds.indexer.PushBlock(block, receipts, totalDifficulty)
if err != nil {
return err
}
@@ -680,7 +682,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
}
}()
output := func(node types2.StateNode) error {
- return sds.indexer.PushStateNode(tx, node, block.Hash().String())
+ return sds.indexer.PushStateNode(tx, node, block.Hash().String(), headerID)
}
codeOutput := func(c types2.CodeAndCodeHash) error {
return sds.indexer.PushCodeAndCodeHash(tx, c)