From e227dbf18242efae3850681d3525edb48d78dfa3 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 24 Jan 2022 14:09:25 -0600 Subject: [PATCH 1/7] v2 and v3 db models and batches --- statediff/indexer/models/shared/batch.go | 23 +++ statediff/indexer/models/shared/models.go | 23 +++ statediff/indexer/models/v2/batch.go | 134 ++++++++++++++++++ statediff/indexer/models/v2/models.go | 149 ++++++++++++++++++++ statediff/indexer/models/{ => v3}/batch.go | 0 statediff/indexer/models/{ => v3}/models.go | 6 - 6 files changed, 329 insertions(+), 6 deletions(-) create mode 100644 statediff/indexer/models/shared/batch.go create mode 100644 statediff/indexer/models/shared/models.go create mode 100644 statediff/indexer/models/v2/batch.go create mode 100644 statediff/indexer/models/v2/models.go rename statediff/indexer/models/{ => v3}/batch.go (100%) rename statediff/indexer/models/{ => v3}/models.go (97%) diff --git a/statediff/indexer/models/shared/batch.go b/statediff/indexer/models/shared/batch.go new file mode 100644 index 000000000..ba3056753 --- /dev/null +++ b/statediff/indexer/models/shared/batch.go @@ -0,0 +1,23 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package models + +// IPLDBatch holds the arguments for a batch insert of IPLD data +type IPLDBatch struct { + Keys []string + Values [][]byte +} diff --git a/statediff/indexer/models/shared/models.go b/statediff/indexer/models/shared/models.go new file mode 100644 index 000000000..0c0b9be40 --- /dev/null +++ b/statediff/indexer/models/shared/models.go @@ -0,0 +1,23 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package models + +// IPLDModel is the db model for public.blocks +type IPLDModel struct { + Key string `db:"key"` + Data []byte `db:"data"` +} diff --git a/statediff/indexer/models/v2/batch.go b/statediff/indexer/models/v2/batch.go new file mode 100644 index 000000000..585d59403 --- /dev/null +++ b/statediff/indexer/models/v2/batch.go @@ -0,0 +1,134 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package models + +import "github.com/lib/pq" + +// IPLDBatch holds the arguments for a batch insert of IPLD data +type IPLDBatch struct { + Keys []string + Values [][]byte +} + +// UncleBatch is the db model for eth.uncle_cids +type UncleBatch struct { + IDs []int64 + HeaderIDs []int64 + BlockHashes []string + ParentHashes []string + CIDs []string + MhKeys []string + Rewards []string +} + +// TxBatch is the db model for eth.transaction_cids +type TxBatch struct { + IDs []int64 + HeaderIDs []int64 + Indexes []int64 + TxHashes []string + CIDs []string + MhKeys []string + Dsts []string + Srcs []string + Data [][]byte + Types []uint8 +} + +// AccessListElementBatch is the db model for eth.access_list_entry +type AccessListElementBatch struct { + IDs []int64 + Indexes []int64 + TxIDs []int64 + Addresses []string + StorageKeys []pq.StringArray +} + +// ReceiptBatch is the db model for eth.receipt_cids +type ReceiptBatch struct { + IDs []int64 + TxIDs []int64 + LeafCIDs []string + LeafMhKeys []string + PostStatuses []int64 + PostStates []string + Contracts []string + ContractHashes []string + LogRoots []string +} + +// StateNodeBatch is the db model for eth.state_cids +type StateNodeBatch struct { + IDs []int64 + HeaderIDs []int64 + Paths [][]byte + StateKeys []string + NodeTypes []int + CIDs []string + MhKeys []string + Diffs []bool +} + +// StorageNodeBatch is the db model for eth.storage_cids +type StorageNodeBatch struct { + IDs []int64 + StateIDs []int64 + Paths [][]byte + StorageKeys []string + NodeTypes []int + CIDs []string + MhKeys []string + Diffs []bool +} + +// StorageNodeWithStateKeyBatch is a db model for eth.storage_cids + eth.state_cids.state_key +type StorageNodeWithStateKeyBatch struct { + IDs []int64 + StateIDs []int64 + Paths [][]byte + StateKeys []string + StorageKeys []string + NodeTypes []int + CIDs []string + MhKeys []string + Diffs []bool +} + +// StateAccountBatch is a db model for an eth state account (decoded value of state leaf node) +type StateAccountBatch struct { + IDs []int64 + StateIDs []int64 + Balances []string + Nonces []int64 + CodeHashes [][]byte + StorageRoots []string +} + +// LogsBatch is the db model for eth.logs +type LogsBatch struct { + IDs []int64 + LeafCIDs []string + LeafMhKeys []string + ReceiptIDs []int64 + Addresses []string + Indexes []int64 + Data [][]byte + Topic0s []string + Topic1s []string + Topic2s []string + Topic3s []string +} diff --git a/statediff/indexer/models/v2/models.go b/statediff/indexer/models/v2/models.go new file mode 100644 index 000000000..d303826b7 --- /dev/null +++ b/statediff/indexer/models/v2/models.go @@ -0,0 +1,149 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package models + +import "github.com/lib/pq" + +// HeaderModel is the db model for eth.header_cids +type HeaderModel struct { + ID int64 `db:"id"` + BlockNumber string `db:"block_number"` + BlockHash string `db:"block_hash"` + ParentHash string `db:"parent_hash"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + TotalDifficulty string `db:"td"` + NodeID int64 `db:"node_id"` + Reward string `db:"reward"` + StateRoot string `db:"state_root"` + UncleRoot string `db:"uncle_root"` + TxRoot string `db:"tx_root"` + RctRoot string `db:"receipt_root"` + Bloom []byte `db:"bloom"` + Timestamp uint64 `db:"timestamp"` + TimesValidated int64 `db:"times_validated"` + BaseFee *int64 `db:"base_fee"` +} + +// UncleModel is the db model for eth.uncle_cids +type UncleModel struct { + ID int64 `db:"id"` + HeaderID int64 `db:"header_id"` + BlockHash string `db:"block_hash"` + ParentHash string `db:"parent_hash"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Reward string `db:"reward"` +} + +// TxModel is the db model for eth.transaction_cids +type TxModel struct { + ID int64 `db:"id"` + HeaderID int64 `db:"header_id"` + Index int64 `db:"index"` + TxHash string `db:"tx_hash"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Dst string `db:"dst"` + Src string `db:"src"` + Data []byte `db:"tx_data"` + Type uint8 `db:"tx_type"` +} + +// AccessListElementModel is the db model for eth.access_list_entry +type AccessListElementModel struct { + ID int64 `db:"id"` + Index int64 `db:"index"` + TxID int64 `db:"tx_id"` + Address string `db:"address"` + StorageKeys pq.StringArray `db:"storage_keys"` +} + +// ReceiptModel is the db model for eth.receipt_cids +type ReceiptModel struct { + ID int64 `db:"id"` + TxID int64 `db:"tx_id"` + LeafCID string `db:"leaf_cid"` + LeafMhKey string `db:"leaf_mh_key"` + PostStatus uint64 `db:"post_status"` + PostState string `db:"post_state"` + Contract string `db:"contract"` + ContractHash string `db:"contract_hash"` + LogRoot string `db:"log_root"` +} + +// StateNodeModel is the db model for eth.state_cids +type StateNodeModel struct { + ID int64 `db:"id"` + HeaderID int64 `db:"header_id"` + Path []byte `db:"state_path"` + StateKey string `db:"state_leaf_key"` + NodeType int `db:"node_type"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Diff bool `db:"diff"` +} + +// StorageNodeModel is the db model for eth.storage_cids +type StorageNodeModel struct { + ID int64 `db:"id"` + StateID int64 `db:"state_id"` + Path []byte `db:"storage_path"` + StorageKey string `db:"storage_leaf_key"` + NodeType int `db:"node_type"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Diff bool `db:"diff"` +} + +// StorageNodeWithStateKeyModel is a db model for eth.storage_cids + eth.state_cids.state_key +type StorageNodeWithStateKeyModel struct { + ID int64 `db:"id"` + StateID int64 `db:"state_id"` + Path []byte `db:"storage_path"` + StateKey string `db:"state_leaf_key"` + StorageKey string `db:"storage_leaf_key"` + NodeType int `db:"node_type"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + Diff bool `db:"diff"` +} + +// StateAccountModel is a db model for an eth state account (decoded value of state leaf node) +type StateAccountModel struct { + ID int64 `db:"id"` + StateID int64 `db:"state_id"` + Balance string `db:"balance"` + Nonce uint64 `db:"nonce"` + CodeHash []byte `db:"code_hash"` + StorageRoot string `db:"storage_root"` +} + +// LogsModel is the db model for eth.logs +type LogsModel struct { + ID int64 `db:"id"` + LeafCID string `db:"leaf_cid"` + LeafMhKey string `db:"leaf_mh_key"` + ReceiptID int64 `db:"receipt_id"` + Address string `db:"address"` + Index int64 `db:"index"` + Data []byte `db:"log_data"` + Topic0 string `db:"topic0"` + Topic1 string `db:"topic1"` + Topic2 string `db:"topic2"` + Topic3 string `db:"topic3"` +} diff --git a/statediff/indexer/models/batch.go b/statediff/indexer/models/v3/batch.go similarity index 100% rename from statediff/indexer/models/batch.go rename to statediff/indexer/models/v3/batch.go diff --git a/statediff/indexer/models/models.go b/statediff/indexer/models/v3/models.go similarity index 97% rename from statediff/indexer/models/models.go rename to statediff/indexer/models/v3/models.go index 2caed1bcb..4088930e3 100644 --- a/statediff/indexer/models/models.go +++ b/statediff/indexer/models/v3/models.go @@ -18,12 +18,6 @@ package models import "github.com/lib/pq" -// IPLDModel is the db model for public.blocks -type IPLDModel struct { - Key string `db:"key"` - Data []byte `db:"data"` -} - // HeaderModel is the db model for eth.header_cids type HeaderModel struct { BlockNumber string `db:"block_number"` -- 2.45.2 From 4be92cd48b37d04499d8be3347c3d922c6906baa Mon Sep 17 00:00:00 2001 From: i-norden Date: Sun, 30 Jan 2022 21:17:00 -0600 Subject: [PATCH 2/7] dual writing --- statediff/indexer/constructor.go | 30 +- statediff/indexer/database/dump/batch_tx.go | 15 +- statediff/indexer/database/dump/indexer.go | 71 ++-- statediff/indexer/database/file/indexer.go | 59 +-- .../indexer/database/file/indexer_test.go | 29 +- statediff/indexer/database/file/writer.go | 30 +- statediff/indexer/database/sql/batch_tx.go | 37 +- statediff/indexer/database/sql/indexer.go | 388 +++++++++++++----- .../database/sql/indexer_shared_test.go | 3 +- statediff/indexer/database/sql/interfaces.go | 87 ---- statediff/indexer/database/sql/metrics.go | 76 ++-- .../database/sql/pgx_indexer_legacy_test.go | 8 +- .../indexer/database/sql/pgx_indexer_test.go | 36 +- .../indexer/database/sql/postgres/config.go | 11 + .../indexer/database/sql/postgres/pgx.go | 48 +-- .../indexer/database/sql/postgres/sqlx.go | 48 +-- .../database/sql/postgres/test_helpers.go | 39 +- .../database/sql/postgres/v2/database.go | 115 ++++++ .../sql/postgres/{ => v3}/database.go | 70 ++-- .../database/sql/sqlx_indexer_legacy_test.go | 8 +- .../indexer/database/sql/sqlx_indexer_test.go | 36 +- statediff/indexer/database/sql/v2/writer.go | 222 ++++++++++ .../indexer/database/sql/{ => v3}/writer.go | 104 +++-- statediff/indexer/interfaces/interfaces.go | 73 +++- statediff/indexer/mocks/test_data.go | 5 +- statediff/indexer/models/v2/batch.go | 6 - statediff/indexer/models/v2/models.go | 34 +- statediff/indexer/models/v3/batch.go | 6 - statediff/service.go | 8 +- 29 files changed, 1131 insertions(+), 571 deletions(-) delete mode 100644 statediff/indexer/database/sql/interfaces.go create mode 100644 statediff/indexer/database/sql/postgres/v2/database.go rename statediff/indexer/database/sql/postgres/{ => v3}/database.go (59%) create mode 100644 statediff/indexer/database/sql/v2/writer.go rename statediff/indexer/database/sql/{ => v3}/writer.go (60%) diff --git a/statediff/indexer/constructor.go b/statediff/indexer/constructor.go index 9a66dba89..a8f2d5211 100644 --- a/statediff/indexer/constructor.go +++ b/statediff/indexer/constructor.go @@ -26,6 +26,8 @@ import ( "github.com/ethereum/go-ethereum/statediff/indexer/database/file" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" + v2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v2" + v3 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v3" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/shared" @@ -44,27 +46,41 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, n return file.NewStateDiffIndexer(ctx, chainConfig, fc) case shared.POSTGRES: log.Info("Starting statediff service in Postgres writing mode") - pgc, ok := config.(postgres.Config) + pgc, ok := config.(postgres.MultiConfig) if !ok { return nil, fmt.Errorf("postgres config is not the correct type: got %T, expected %T", config, postgres.Config{}) } var err error - var driver sql.Driver - switch pgc.Driver { + var oldDriver, newDriver interfaces.Driver + switch pgc.V2.Driver { case postgres.PGX: - driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo) + oldDriver, err = postgres.NewPGXDriver(ctx, pgc.V2) if err != nil { return nil, err } case postgres.SQLX: - driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo) + oldDriver, err = postgres.NewSQLXDriver(ctx, pgc.V2) if err != nil { return nil, err } default: - return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.Driver) + return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.V2.Driver) } - return sql.NewStateDiffIndexer(ctx, chainConfig, postgres.NewPostgresDB(driver)) + switch pgc.V2.Driver { + case postgres.PGX: + newDriver, err = postgres.NewPGXDriver(ctx, pgc.V3) + if err != nil { + return nil, err + } + case postgres.SQLX: + newDriver, err = postgres.NewSQLXDriver(ctx, pgc.V3) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.V3.Driver) + } + return sql.NewStateDiffIndexer(ctx, chainConfig, nodeInfo, v2.NewPostgresDB(oldDriver), v3.NewPostgresDB(newDriver)) case shared.DUMP: log.Info("Starting statediff service in data dump mode") dumpc, ok := config.(dump.Config) diff --git a/statediff/indexer/database/dump/batch_tx.go b/statediff/indexer/database/dump/batch_tx.go index f1754b907..9e001dbca 100644 --- a/statediff/indexer/database/dump/batch_tx.go +++ b/statediff/indexer/database/dump/batch_tx.go @@ -22,7 +22,6 @@ import ( "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/models" blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" node "github.com/ipfs/go-ipld-format" @@ -33,8 +32,8 @@ type BatchTx struct { BlockNumber uint64 dump io.Writer quit chan struct{} - iplds chan models.IPLDModel - ipldCache models.IPLDBatch + iplds chan v3.IPLDModel + ipldCache v3.IPLDBatch submit func(blockTx *BatchTx, err error) error } @@ -48,7 +47,7 @@ func (tx *BatchTx) flush() error { if _, err := fmt.Fprintf(tx.dump, "%+v\r\n", tx.ipldCache); err != nil { return err } - tx.ipldCache = models.IPLDBatch{} + tx.ipldCache = v3.IPLDBatch{} return nil } @@ -60,21 +59,21 @@ func (tx *BatchTx) cache() { tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key) tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data) case <-tx.quit: - tx.ipldCache = models.IPLDBatch{} + tx.ipldCache = v3.IPLDBatch{} return } } } func (tx *BatchTx) cacheDirect(key string, value []byte) { - tx.iplds <- models.IPLDModel{ + tx.iplds <- v3.IPLDModel{ Key: key, Data: value, } } func (tx *BatchTx) cacheIPLD(i node.Node) { - tx.iplds <- models.IPLDModel{ + tx.iplds <- v3.IPLDModel{ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), Data: i.RawData(), } @@ -86,7 +85,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error return "", "", err } prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String() - tx.iplds <- models.IPLDModel{ + tx.iplds <- v3.IPLDModel{ Key: prefixedKey, Data: raw, } diff --git a/statediff/indexer/database/dump/indexer.go b/statediff/indexer/database/dump/indexer.go index e450f941a..21f9f347a 100644 --- a/statediff/indexer/database/dump/indexer.go +++ b/statediff/indexer/database/dump/indexer.go @@ -22,8 +22,6 @@ import ( "math/big" "time" - ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" @@ -36,7 +34,9 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" "github.com/ethereum/go-ethereum/statediff/indexer/shared" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) @@ -61,12 +61,15 @@ func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) *StateD } } -// ReportDBMetrics has nothing to report for dump -func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {} +// ReportOldDBMetrics has nothing to report for dump +func (sdi *StateDiffIndexer) ReportOldDBMetrics(time.Duration, <-chan bool) {} + +// ReportNewDBMetrics has nothing to report for dump +func (sdi *StateDiffIndexer) ReportNewDBMetrics(time.Duration, <-chan bool) {} // PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts) // Returns an initiated DB transaction which must be Closed via defer to commit or rollback -func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) { +func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) { start, t := time.Now(), time.Now() blockHash := block.Hash() blockHashStr := blockHash.String() @@ -75,20 +78,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip transactions := block.Transactions() // Derive any missing fields if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil { - return nil, err + return nil, 0, err } // Generate the block iplds headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) if err != nil { - return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) + return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { - return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) + return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) } if len(txTrieNodes) != len(rctTrieNodes) { - return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) } // Calculate reward @@ -104,9 +107,9 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip blockTx := &BatchTx{ BlockNumber: height, dump: sdi.dump, - iplds: make(chan models.IPLDModel), + iplds: make(chan sharedModels.IPLDModel), quit: make(chan struct{}), - ipldCache: models.IPLDBatch{}, + ipldCache: sharedModels.IPLDBatch{}, submit: func(self *BatchTx, err error) error { close(self.quit) close(self.iplds) @@ -139,7 +142,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip var headerID string headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) indexerMetrics.tHeaderProcessing.Update(tDiff) @@ -148,7 +151,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip // Publish and index uncles err = sdi.processUncles(blockTx, headerID, height, uncleNodes) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) indexerMetrics.tUncleProcessing.Update(tDiff) @@ -169,14 +172,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip rctLeafNodeCIDs: rctLeafNodeCIDs, }) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) indexerMetrics.tTxAndRecProcessing.Update(tDiff) traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String()) t = time.Now() - return blockTx, err + return blockTx, 0, err } // processHeader publishes and indexes a header IPLD in Postgres @@ -185,7 +188,7 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he tx.cacheIPLD(headerNode) headerID := header.Hash().String() - mod := models.HeaderModel{ + mod := v3Models.HeaderModel{ CID: headerNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), ParentHash: header.ParentHash.String(), @@ -217,7 +220,7 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu } else { uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) } - uncle := models.UncleModel{ + uncle := v3Models.UncleModel{ HeaderID: headerID, CID: uncleNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), @@ -273,7 +276,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } - txModel := models.TxModel{ + txModel := v3Models.TxModel{ HeaderID: args.headerID, Dst: shared.HandleZeroAddrPointer(trx.To()), Src: shared.HandleZeroAddr(from), @@ -295,7 +298,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs for k, storageKey := range accessListElement.StorageKeys { storageKeys[k] = storageKey.Hex() } - accessListElementModel := models.AccessListElementModel{ + accessListElementModel := v3Models.AccessListElementModel{ TxID: trxID, Index: int64(j), Address: accessListElement.Address.Hex(), @@ -318,7 +321,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return fmt.Errorf("invalid receipt leaf node cid") } - rctModel := &models.ReceiptModel{ + rctModel := &v3Models.ReceiptModel{ TxID: trxID, Contract: contract, ContractHash: contractHash, @@ -336,7 +339,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return err } - logDataSet := make([]*models.LogsModel, len(receipt.Logs)) + logDataSet := make([]*v3Models.LogsModel, len(receipt.Logs)) for idx, l := range receipt.Logs { topicSet := make([]string, 4) for ti, topic := range l.Topics { @@ -347,7 +350,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return fmt.Errorf("invalid log cid") } - logDataSet[idx] = &models.LogsModel{ + logDataSet[idx] = &v3Models.LogsModel{ ReceiptID: trxID, Address: l.Address.String(), Index: int64(l.Index), @@ -376,7 +379,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs } // PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql -func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error { +func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error { tx, ok := batch.(*BatchTx) if !ok { return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) @@ -385,8 +388,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if stateNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - stateModel := models.StateNodeModel{ - HeaderID: headerID, + stateModel := v3Models.StateNodeModel{ + HeaderID: headerHash, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: shared.RemovedNodeStateCID, @@ -400,8 +403,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } - stateModel := models.StateNodeModel{ - HeaderID: headerID, + stateModel := v3Models.StateNodeModel{ + HeaderID: headerHash, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: stateCIDStr, @@ -425,8 +428,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { return fmt.Errorf("error decoding state account rlp: %s", err.Error()) } - accountModel := models.StateAccountModel{ - HeaderID: headerID, + accountModel := v3Models.StateAccountModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Balance: account.Balance.String(), Nonce: account.Nonce, @@ -442,8 +445,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if storageNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + storageModel := v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), @@ -460,8 +463,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + storageModel := v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), diff --git a/statediff/indexer/database/file/indexer.go b/statediff/indexer/database/file/indexer.go index 870c1f259..49da42493 100644 --- a/statediff/indexer/database/file/indexer.go +++ b/statediff/indexer/database/file/indexer.go @@ -38,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" "github.com/ethereum/go-ethereum/statediff/indexer/shared" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) @@ -86,12 +86,15 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, c }, nil } -// ReportDBMetrics has nothing to report for dump -func (sdi *StateDiffIndexer) ReportDBMetrics(time.Duration, <-chan bool) {} +// ReportOldDBMetrics has nothing to report for dump +func (sdi *StateDiffIndexer) ReportOldDBMetrics(time.Duration, <-chan bool) {} + +// ReportNewDBMetrics has nothing to report for dump +func (sdi *StateDiffIndexer) ReportNewDBMetrics(time.Duration, <-chan bool) {} // PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts) // Returns an initiated DB transaction which must be Closed via defer to commit or rollback -func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) { +func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) { start, t := time.Now(), time.Now() blockHash := block.Hash() blockHashStr := blockHash.String() @@ -100,20 +103,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip transactions := block.Transactions() // Derive any missing fields if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil { - return nil, err + return nil, 0, err } // Generate the block iplds headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) if err != nil { - return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) + return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { - return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) + return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) } if len(txTrieNodes) != len(rctTrieNodes) { - return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) } // Calculate reward @@ -176,14 +179,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip rctLeafNodeCIDs: rctLeafNodeCIDs, }) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) indexerMetrics.tTxAndRecProcessing.Update(tDiff) traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String()) t = time.Now() - return blockTx, err + return blockTx, 0, err } // processHeader write a header IPLD insert SQL stmt to a file @@ -197,7 +200,7 @@ func (sdi *StateDiffIndexer) processHeader(header *types.Header, headerNode node *baseFee = header.BaseFee.String() } headerID := header.Hash().String() - sdi.fileWriter.upsertHeaderCID(models.HeaderModel{ + sdi.fileWriter.upsertHeaderCID(v3Models.HeaderModel{ NodeID: sdi.nodeID, CID: headerNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), @@ -229,7 +232,7 @@ func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber uint64, } else { uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) } - sdi.fileWriter.upsertUncleCID(models.UncleModel{ + sdi.fileWriter.upsertUncleCID(v3Models.UncleModel{ HeaderID: headerID, CID: uncleNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), @@ -280,7 +283,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } - txModel := models.TxModel{ + txModel := v3Models.TxModel{ HeaderID: args.headerID, Dst: shared.HandleZeroAddrPointer(trx.To()), Src: shared.HandleZeroAddr(from), @@ -300,7 +303,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { for k, storageKey := range accessListElement.StorageKeys { storageKeys[k] = storageKey.Hex() } - accessListElementModel := models.AccessListElementModel{ + accessListElementModel := v3Models.AccessListElementModel{ TxID: txID, Index: int64(j), Address: accessListElement.Address.Hex(), @@ -321,7 +324,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { return fmt.Errorf("invalid receipt leaf node cid") } - rctModel := &models.ReceiptModel{ + rctModel := &v3Models.ReceiptModel{ TxID: txID, Contract: contract, ContractHash: contractHash, @@ -337,7 +340,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { sdi.fileWriter.upsertReceiptCID(rctModel) // index logs - logDataSet := make([]*models.LogsModel, len(receipt.Logs)) + logDataSet := make([]*v3Models.LogsModel, len(receipt.Logs)) for idx, l := range receipt.Logs { topicSet := make([]string, 4) for ti, topic := range l.Topics { @@ -348,7 +351,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { return fmt.Errorf("invalid log cid") } - logDataSet[idx] = &models.LogsModel{ + logDataSet[idx] = &v3Models.LogsModel{ ReceiptID: txID, Address: l.Address.String(), Index: int64(l.Index), @@ -374,13 +377,13 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error { } // PushStateNode writes a state diff node object (including any child storage nodes) IPLD insert SQL stmt to a file -func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error { +func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error { // publish the state node if stateNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - stateModel := models.StateNodeModel{ - HeaderID: headerID, + stateModel := v3Models.StateNodeModel{ + HeaderID: headerHash, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: shared.RemovedNodeStateCID, @@ -394,8 +397,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } - stateModel := models.StateNodeModel{ - HeaderID: headerID, + stateModel := v3Models.StateNodeModel{ + HeaderID: headerHash, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: stateCIDStr, @@ -417,8 +420,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { return fmt.Errorf("error decoding state account rlp: %s", err.Error()) } - accountModel := models.StateAccountModel{ - HeaderID: headerID, + accountModel := v3Models.StateAccountModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Balance: account.Balance.String(), Nonce: account.Nonce, @@ -432,8 +435,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if storageNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + storageModel := v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), @@ -448,8 +451,8 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + storageModel := v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), diff --git a/statediff/indexer/database/file/indexer_test.go b/statediff/indexer/database/file/indexer_test.go index ef849e8e8..e5a030dcf 100644 --- a/statediff/indexer/database/file/indexer_test.go +++ b/statediff/indexer/database/file/indexer_test.go @@ -24,9 +24,10 @@ import ( "os" "testing" + "github.com/ethereum/go-ethereum/statediff/indexer/models/v2" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/indexer/models" "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/ipfs/go-cid" @@ -331,7 +332,7 @@ func TestFileIndexer(t *testing.T) { if txRes.Value != transactions[3].Value().String() { t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) } - accessListElementModels := make([]models.AccessListElementModel, 0) + accessListElementModels := make([]v2.AccessListElementModel, 0) pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC` err = sqlxdb.Select(&accessListElementModels, pgStr, c) if err != nil { @@ -340,11 +341,11 @@ func TestFileIndexer(t *testing.T) { if len(accessListElementModels) != 2 { t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) } - model1 := models.AccessListElementModel{ + model1 := v2.AccessListElementModel{ Index: accessListElementModels[0].Index, Address: accessListElementModels[0].Address, } - model2 := models.AccessListElementModel{ + model2 := v2.AccessListElementModel{ Index: accessListElementModels[1].Index, Address: accessListElementModels[1].Address, StorageKeys: accessListElementModels[1].StorageKeys, @@ -447,7 +448,7 @@ func TestFileIndexer(t *testing.T) { expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) for idx, c := range rcts { - result := make([]models.IPLDModel, 0) + result := make([]v3.IPLDModel, 0) pgStr = `SELECT data FROM eth.receipt_cids INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) @@ -531,7 +532,7 @@ func TestFileIndexer(t *testing.T) { defer tearDown(t) // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) + stateNodes := make([]v2.StateNodeModel, 0) pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type != 3` @@ -553,7 +554,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel + var account v2.StateAccountModel err = sqlxdb.Get(&account, pgStr, stateNode.HeaderID, stateNode.Path) if err != nil { t.Fatal(err) @@ -563,7 +564,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v2.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "0", @@ -577,7 +578,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v2.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "1000", @@ -589,7 +590,7 @@ func TestFileIndexer(t *testing.T) { } // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) + stateNodes = make([]v2.StateNodeModel, 0) pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type = 3` @@ -622,7 +623,7 @@ func TestFileIndexer(t *testing.T) { defer tearDown(t) // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes := make([]v2.StorageNodeWithStateKeyModel, 0) pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -634,7 +635,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{ CID: storageCID.String(), NodeType: 2, StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), @@ -655,7 +656,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes = make([]v2.StorageNodeWithStateKeyModel, 0) pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -667,7 +668,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{ CID: shared.RemovedNodeStorageCID, NodeType: 3, StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), diff --git a/statediff/indexer/database/file/writer.go b/statediff/indexer/database/file/writer.go index 48de0853d..520c2c641 100644 --- a/statediff/indexer/database/file/writer.go +++ b/statediff/indexer/database/file/writer.go @@ -20,13 +20,15 @@ import ( "fmt" "io" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" node "github.com/ipfs/go-ipld-format" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" ) @@ -161,19 +163,19 @@ func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) { sqw.stmts <- []byte(fmt.Sprintf(nodeInsert, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID)) } -func (sqw *SQLWriter) upsertIPLD(ipld models.IPLDModel) { +func (sqw *SQLWriter) upsertIPLD(ipld sharedModels.IPLDModel) { sqw.stmts <- []byte(fmt.Sprintf(ipldInsert, ipld.Key, ipld.Data)) } func (sqw *SQLWriter) upsertIPLDDirect(key string, value []byte) { - sqw.upsertIPLD(models.IPLDModel{ + sqw.upsertIPLD(sharedModels.IPLDModel{ Key: key, Data: value, }) } func (sqw *SQLWriter) upsertIPLDNode(i node.Node) { - sqw.upsertIPLD(models.IPLDModel{ + sqw.upsertIPLD(sharedModels.IPLDModel{ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), Data: i.RawData(), }) @@ -185,14 +187,14 @@ func (sqw *SQLWriter) upsertIPLDRaw(codec, mh uint64, raw []byte) (string, strin return "", "", err } prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String() - sqw.upsertIPLD(models.IPLDModel{ + sqw.upsertIPLD(sharedModels.IPLDModel{ Key: prefixedKey, Data: raw, }) return c.String(), prefixedKey, err } -func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) { +func (sqw *SQLWriter) upsertHeaderCID(header v3Models.HeaderModel) { stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, header.NodeID, header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase) @@ -200,30 +202,30 @@ func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) { indexerMetrics.blocks.Inc(1) } -func (sqw *SQLWriter) upsertUncleCID(uncle models.UncleModel) { +func (sqw *SQLWriter) upsertUncleCID(uncle v3Models.UncleModel) { sqw.stmts <- []byte(fmt.Sprintf(uncleInsert, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey)) } -func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) { +func (sqw *SQLWriter) upsertTransactionCID(transaction v3Models.TxModel) { sqw.stmts <- []byte(fmt.Sprintf(txInsert, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value)) indexerMetrics.transactions.Inc(1) } -func (sqw *SQLWriter) upsertAccessListElement(accessListElement models.AccessListElementModel) { +func (sqw *SQLWriter) upsertAccessListElement(accessListElement v3Models.AccessListElementModel) { sqw.stmts <- []byte(fmt.Sprintf(alInsert, accessListElement.TxID, accessListElement.Index, accessListElement.Address, formatPostgresStringArray(accessListElement.StorageKeys))) indexerMetrics.accessListEntries.Inc(1) } -func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) { +func (sqw *SQLWriter) upsertReceiptCID(rct *v3Models.ReceiptModel) { sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot)) indexerMetrics.receipts.Inc(1) } -func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) { +func (sqw *SQLWriter) upsertLogCID(logs []*v3Models.LogsModel) { for _, l := range logs { sqw.stmts <- []byte(fmt.Sprintf(logInsert, l.LeafCID, l.LeafMhKey, l.ReceiptID, l.Address, l.Index, l.Topic0, l.Topic1, l.Topic2, l.Topic3, l.Data)) @@ -231,7 +233,7 @@ func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) { } } -func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) { +func (sqw *SQLWriter) upsertStateCID(stateNode v3Models.StateNodeModel) { var stateKey string if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey @@ -240,12 +242,12 @@ func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) { stateNode.NodeType, true, stateNode.MhKey)) } -func (sqw *SQLWriter) upsertStateAccount(stateAccount models.StateAccountModel) { +func (sqw *SQLWriter) upsertStateAccount(stateAccount v3Models.StateAccountModel) { sqw.stmts <- []byte(fmt.Sprintf(accountInsert, stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot)) } -func (sqw *SQLWriter) upsertStorageCID(storageCID models.StorageNodeModel) { +func (sqw *SQLWriter) upsertStorageCID(storageCID v3Models.StorageNodeModel) { var storageKey string if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey diff --git a/statediff/indexer/database/sql/batch_tx.go b/statediff/indexer/database/sql/batch_tx.go index fb1b289a1..deec4f07b 100644 --- a/statediff/indexer/database/sql/batch_tx.go +++ b/statediff/indexer/database/sql/batch_tx.go @@ -19,6 +19,8 @@ package sql import ( "context" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + blockstore "github.com/ipfs/go-ipfs-blockstore" dshelp "github.com/ipfs/go-ipfs-ds-help" node "github.com/ipfs/go-ipld-format" @@ -26,18 +28,19 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + modelsShared "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" ) // BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration type BatchTx struct { - BlockNumber uint64 - ctx context.Context - dbtx Tx - stm string - quit chan struct{} - iplds chan models.IPLDModel - ipldCache models.IPLDBatch + BlockNumber uint64 + ctx context.Context + oldDBTx interfaces.Tx + newDBTx interfaces.Tx + oldStmt, newStmt string + quit chan struct{} + iplds chan modelsShared.IPLDModel + ipldCache modelsShared.IPLDBatch submit func(blockTx *BatchTx, err error) error } @@ -48,11 +51,15 @@ func (tx *BatchTx) Submit(err error) error { } func (tx *BatchTx) flush() error { - _, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) + _, err := tx.oldDBTx.Exec(tx.ctx, tx.oldStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) if err != nil { return err } - tx.ipldCache = models.IPLDBatch{} + _, err = tx.newDBTx.Exec(tx.ctx, tx.newStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) + if err != nil { + return err + } + tx.ipldCache = modelsShared.IPLDBatch{} return nil } @@ -64,21 +71,21 @@ func (tx *BatchTx) cache() { tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key) tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data) case <-tx.quit: - tx.ipldCache = models.IPLDBatch{} + tx.ipldCache = modelsShared.IPLDBatch{} return } } } func (tx *BatchTx) cacheDirect(key string, value []byte) { - tx.iplds <- models.IPLDModel{ + tx.iplds <- modelsShared.IPLDModel{ Key: key, Data: value, } } func (tx *BatchTx) cacheIPLD(i node.Node) { - tx.iplds <- models.IPLDModel{ + tx.iplds <- modelsShared.IPLDModel{ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), Data: i.RawData(), } @@ -90,7 +97,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error return "", "", err } prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String() - tx.iplds <- models.IPLDModel{ + tx.iplds <- modelsShared.IPLDModel{ Key: prefixedKey, Data: raw, } @@ -98,7 +105,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error } // rollback sql transaction and log any error -func rollback(ctx context.Context, tx Tx) { +func rollback(ctx context.Context, tx interfaces.Tx) { if err := tx.Rollback(ctx); err != nil { log.Error(err.Error()) } diff --git a/statediff/indexer/database/sql/indexer.go b/statediff/indexer/database/sql/indexer.go index 3e578a469..2b67832f1 100644 --- a/statediff/indexer/database/sql/indexer.go +++ b/statediff/indexer/database/sql/indexer.go @@ -21,12 +21,12 @@ package sql import ( "context" + "errors" "fmt" "math/big" + "strings" "time" - ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" @@ -38,8 +38,14 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + v2Writer "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/v2" + v3Writer "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/v3" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + v2Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v2" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + nodeInfo "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/shared" sdtypes "github.com/ethereum/go-ethereum/statediff/types" ) @@ -55,24 +61,38 @@ var ( type StateDiffIndexer struct { ctx context.Context chainConfig *params.ChainConfig - dbWriter *Writer + oldDBWriter *v2Writer.Writer + newDBWriter *v3Writer.Writer } // NewStateDiffIndexer creates a sql implementation of interfaces.StateDiffIndexer -func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, db Database) (*StateDiffIndexer, error) { +func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, info nodeInfo.Info, old, new interfaces.Database) (*StateDiffIndexer, error) { // Write the removed node to the db on init - if _, err := db.Exec(ctx, db.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { + if _, err := old.Exec(ctx, old.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { + return nil, err + } + if _, err := new.Exec(ctx, new.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { + return nil, err + } + // Write node info to the db on init + oldWriter := v2Writer.NewWriter(old) + newWriter := v3Writer.NewWriter(new) + if err := oldWriter.InsertNodeInfo(info); err != nil { + return nil, err + } + if err := newWriter.InsertNodeInfo(info); err != nil { return nil, err } return &StateDiffIndexer{ ctx: ctx, chainConfig: chainConfig, - dbWriter: NewWriter(db), + oldDBWriter: oldWriter, + newDBWriter: newWriter, }, nil } -// ReportDBMetrics is a reporting function to run as goroutine -func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bool) { +// ReportOldDBMetrics is a reporting function to run as goroutine +func (sdi *StateDiffIndexer) ReportOldDBMetrics(delay time.Duration, quit <-chan bool) { if !metrics.Enabled { return } @@ -81,7 +101,26 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo for { select { case <-ticker.C: - dbMetrics.Update(sdi.dbWriter.db.Stats()) + dbMetrics.Update(sdi.oldDBWriter.Stats()) + case <-quit: + ticker.Stop() + return + } + } + }() +} + +// ReportNewDBMetrics is a reporting function to run as goroutine +func (sdi *StateDiffIndexer) ReportNewDBMetrics(delay time.Duration, quit <-chan bool) { + if !metrics.Enabled { + return + } + ticker := time.NewTicker(delay) + go func() { + for { + select { + case <-ticker.C: + dbMetrics.Update(sdi.newDBWriter.DB.Stats()) case <-quit: ticker.Stop() return @@ -92,7 +131,7 @@ func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bo // PushBlock pushes and indexes block data in sql, except state & storage nodes (includes header, uncles, transactions & receipts) // Returns an initiated DB transaction which must be Closed via defer to commit or rollback -func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) { +func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, int64, error) { start, t := time.Now(), time.Now() blockHash := block.Hash() blockHashStr := blockHash.String() @@ -101,20 +140,20 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip transactions := block.Transactions() // Derive any missing fields if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, transactions); err != nil { - return nil, err + return nil, 0, err } // Generate the block iplds - headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld2.FromBlockAndReceipts(block, receipts) + headerNode, uncleNodes, txNodes, txTrieNodes, rctNodes, rctTrieNodes, logTrieNodes, logLeafNodeCIDs, rctLeafNodeCIDs, err := ipld.FromBlockAndReceipts(block, receipts) if err != nil { - return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) + return nil, 0, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err) } if len(txNodes) != len(rctNodes) || len(rctNodes) != len(rctLeafNodeCIDs) { - return nil, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) + return nil, 0, fmt.Errorf("expected number of transactions (%d), receipts (%d), and receipt trie leaf nodes (%d) to be equal", len(txNodes), len(rctNodes), len(rctLeafNodeCIDs)) } if len(txTrieNodes) != len(rctTrieNodes) { - return nil, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) + return nil, 0, fmt.Errorf("expected number of tx trie (%d) and rct trie (%d) nodes to be equal", len(txTrieNodes), len(rctTrieNodes)) } // Calculate reward @@ -128,26 +167,40 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip t = time.Now() // Begin new db tx for everything - tx, err := sdi.dbWriter.db.Begin(sdi.ctx) + oldTx, err := sdi.oldDBWriter.DB.Begin(sdi.ctx) if err != nil { - return nil, err + return nil, 0, err } defer func() { if p := recover(); p != nil { - rollback(sdi.ctx, tx) + rollback(sdi.ctx, oldTx) panic(p) } else if err != nil { - rollback(sdi.ctx, tx) + rollback(sdi.ctx, oldTx) + } + }() + newTx, err := sdi.newDBWriter.DB.Begin(sdi.ctx) + if err != nil { + return nil, 0, err + } + defer func() { + if p := recover(); p != nil { + rollback(sdi.ctx, newTx) + panic(p) + } else if err != nil { + rollback(sdi.ctx, newTx) } }() blockTx := &BatchTx{ ctx: sdi.ctx, BlockNumber: height, - stm: sdi.dbWriter.db.InsertIPLDsStm(), - iplds: make(chan models.IPLDModel), + oldStmt: sdi.oldDBWriter.DB.InsertIPLDsStm(), + newStmt: sdi.newDBWriter.DB.InsertStateStm(), + iplds: make(chan sharedModels.IPLDModel), quit: make(chan struct{}), - ipldCache: models.IPLDBatch{}, - dbtx: tx, + ipldCache: sharedModels.IPLDBatch{}, + oldDBTx: oldTx, + newDBTx: newTx, // handle transaction commit or rollback for any return case submit: func(self *BatchTx, err error) error { defer func() { @@ -155,24 +208,38 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip close(self.iplds) }() if p := recover(); p != nil { - rollback(sdi.ctx, tx) + rollback(sdi.ctx, oldTx) + rollback(sdi.ctx, newTx) panic(p) } else if err != nil { - rollback(sdi.ctx, tx) + rollback(sdi.ctx, oldTx) + rollback(sdi.ctx, newTx) } else { tDiff := time.Since(t) - indexerMetrics.tStateStoreCodeProcessing.Update(tDiff) + indexerMetrics.TimeStateStoreCodeProcessing.Update(tDiff) traceMsg += fmt.Sprintf("state, storage, and code storage processing time: %s\r\n", tDiff.String()) t = time.Now() if err := self.flush(); err != nil { - rollback(sdi.ctx, tx) + rollback(sdi.ctx, oldTx) + rollback(sdi.ctx, newTx) traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String()) log.Debug(traceMsg) return err } - err = tx.Commit(sdi.ctx) + errs := make([]string, 0, 2) + err = oldTx.Commit(sdi.ctx) + if err != nil { + errs = append(errs, fmt.Sprintf("old DB tx commit error: %s", err.Error())) + } + err = newTx.Commit(sdi.ctx) + if err != nil { + errs = append(errs, fmt.Sprintf("new DB tx commit error: %s", err.Error())) + } + if len(errs) > 0 { + err = errors.New(strings.Join(errs, " && ")) + } tDiff = time.Since(t) - indexerMetrics.tPostgresCommit.Update(tDiff) + indexerMetrics.TimePostgresCommit.Update(tDiff) traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String()) } traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String()) @@ -183,32 +250,33 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip go blockTx.cache() tDiff := time.Since(t) - indexerMetrics.tFreePostgres.Update(tDiff) + indexerMetrics.TimeFreePostgres.Update(tDiff) traceMsg += fmt.Sprintf("time spent waiting for free postgres tx: %s:\r\n", tDiff.String()) t = time.Now() // Publish and index header, collect headerID - var headerID string + var headerID int64 headerID, err = sdi.processHeader(blockTx, block.Header(), headerNode, reward, totalDifficulty) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) - indexerMetrics.tHeaderProcessing.Update(tDiff) + indexerMetrics.TimeHeaderProcessing.Update(tDiff) traceMsg += fmt.Sprintf("header processing time: %s\r\n", tDiff.String()) t = time.Now() // Publish and index uncles - err = sdi.processUncles(blockTx, headerID, height, uncleNodes) + err = sdi.processUncles(blockTx, blockHashStr, headerID, height, uncleNodes) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) - indexerMetrics.tUncleProcessing.Update(tDiff) + indexerMetrics.TimeUncleProcessing.Update(tDiff) traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String()) t = time.Now() // Publish and index receipts and txs err = sdi.processReceiptsAndTxs(blockTx, processArgs{ + headerHash: blockHashStr, headerID: headerID, blockNumber: block.Number(), receipts: receipts, @@ -222,19 +290,19 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip rctLeafNodeCIDs: rctLeafNodeCIDs, }) if err != nil { - return nil, err + return nil, 0, err } tDiff = time.Since(t) - indexerMetrics.tTxAndRecProcessing.Update(tDiff) + indexerMetrics.TimeTxAndRecProcessing.Update(tDiff) traceMsg += fmt.Sprintf("tx and receipt processing time: %s\r\n", tDiff.String()) t = time.Now() - return blockTx, err + return blockTx, headerID, err } // processHeader publishes and indexes a header IPLD in Postgres // it returns the headerID -func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (string, error) { +func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode node.Node, reward, td *big.Int) (int64, error) { tx.cacheIPLD(headerNode) var baseFee *string @@ -242,14 +310,32 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he baseFee = new(string) *baseFee = header.BaseFee.String() } - headerID := header.Hash().String() // index header - return headerID, sdi.dbWriter.upsertHeaderCID(tx.dbtx, models.HeaderModel{ + headerID, err := sdi.oldDBWriter.InsertHeaderCID(tx.oldDBTx, &v2Models.HeaderModel{ CID: headerNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), ParentHash: header.ParentHash.String(), BlockNumber: header.Number.String(), - BlockHash: headerID, + BlockHash: header.Hash().String(), + TotalDifficulty: td.String(), + Reward: reward.String(), + Bloom: header.Bloom.Bytes(), + StateRoot: header.Root.String(), + RctRoot: header.ReceiptHash.String(), + TxRoot: header.TxHash.String(), + UncleRoot: header.UncleHash.String(), + Timestamp: header.Time, + BaseFee: baseFee, + }) + if err != nil { + return 0, err + } + if err := sdi.newDBWriter.InsertHeaderCID(tx.newDBTx, v3Models.HeaderModel{ + CID: headerNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), + ParentHash: header.ParentHash.String(), + BlockNumber: header.Number.String(), + BlockHash: header.Hash().String(), TotalDifficulty: td.String(), Reward: reward.String(), Bloom: header.Bloom.Bytes(), @@ -259,11 +345,14 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he UncleRoot: header.UncleHash.String(), Timestamp: header.Time, Coinbase: header.Coinbase.String(), - }) + }); err != nil { + return 0, err + } + return headerID, nil } // processUncles publishes and indexes uncle IPLDs in Postgres -func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNumber uint64, uncleNodes []*ipld2.EthHeader) error { +func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerHash string, headerID int64, blockNumber uint64, uncleNodes []*ipld.EthHeader) error { // publish and index uncles for _, uncleNode := range uncleNodes { tx.cacheIPLD(uncleNode) @@ -274,15 +363,24 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu } else { uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) } - uncle := models.UncleModel{ + if err := sdi.oldDBWriter.InsertUncleCID(tx.oldDBTx, &v2Models.UncleModel{ HeaderID: headerID, CID: uncleNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), ParentHash: uncleNode.ParentHash.String(), BlockHash: uncleNode.Hash().String(), Reward: uncleReward.String(), + }); err != nil { + return err } - if err := sdi.dbWriter.upsertUncleCID(tx.dbtx, uncle); err != nil { + if err := sdi.newDBWriter.InsertUncleCID(tx.newDBTx, &v3Models.UncleModel{ + HeaderID: headerHash, + CID: uncleNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), + ParentHash: uncleNode.ParentHash.String(), + BlockHash: uncleNode.Hash().String(), + Reward: uncleReward.String(), + }); err != nil { return err } } @@ -291,14 +389,15 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu // processArgs bundles arguments to processReceiptsAndTxs type processArgs struct { - headerID string + headerID int64 + headerHash string blockNumber *big.Int receipts types.Receipts txs types.Transactions - rctNodes []*ipld2.EthReceipt - rctTrieNodes []*ipld2.EthRctTrie - txNodes []*ipld2.EthTx - txTrieNodes []*ipld2.EthTxTrie + rctNodes []*ipld.EthReceipt + rctTrieNodes []*ipld.EthRctTrie + txNodes []*ipld.EthTx + txTrieNodes []*ipld.EthTxTrie logTrieNodes [][]node.Node logLeafNodeCIDs [][]cid.Cid rctLeafNodeCIDs []cid.Cid @@ -317,7 +416,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs // index tx trx := args.txs[i] - txID := trx.Hash().String() + txHash := trx.Hash().String() var val string if trx.Value() != nil { @@ -329,35 +428,54 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } - txModel := models.TxModel{ + txID, err := sdi.oldDBWriter.InsertTransactionCID(tx.oldDBTx, &v2Models.TxModel{ HeaderID: args.headerID, Dst: shared.HandleZeroAddrPointer(trx.To()), Src: shared.HandleZeroAddr(from), - TxHash: txID, + TxHash: txHash, + Index: int64(i), + Data: trx.Data(), + CID: txNode.Cid().String(), + MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + Type: trx.Type(), + }) + if err != nil { + return err + } + if err := sdi.newDBWriter.InsertTransactionCID(tx.newDBTx, &v3Models.TxModel{ + HeaderID: args.headerHash, + Dst: shared.HandleZeroAddrPointer(trx.To()), + Src: shared.HandleZeroAddr(from), + TxHash: txHash, Index: int64(i), Data: trx.Data(), CID: txNode.Cid().String(), MhKey: shared.MultihashKeyFromCID(txNode.Cid()), Type: trx.Type(), Value: val, - } - if err := sdi.dbWriter.upsertTransactionCID(tx.dbtx, txModel); err != nil { + }); err != nil { return err } - // index access list if this is one for j, accessListElement := range trx.AccessList() { storageKeys := make([]string, len(accessListElement.StorageKeys)) for k, storageKey := range accessListElement.StorageKeys { storageKeys[k] = storageKey.Hex() } - accessListElementModel := models.AccessListElementModel{ + if err := sdi.oldDBWriter.InsertAccessListElement(tx.oldDBTx, &v2Models.AccessListElementModel{ TxID: txID, Index: int64(j), Address: accessListElement.Address.Hex(), StorageKeys: storageKeys, + }); err != nil { + return err } - if err := sdi.dbWriter.upsertAccessListElement(tx.dbtx, accessListElementModel); err != nil { + if err := sdi.newDBWriter.InsertAccessListElement(tx.newDBTx, &v3Models.AccessListElementModel{ + TxID: txHash, + Index: int64(j), + Address: accessListElement.Address.Hex(), + StorageKeys: storageKeys, + }); err != nil { return err } } @@ -374,26 +492,44 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return fmt.Errorf("invalid receipt leaf node cid") } - rctModel := &models.ReceiptModel{ + var postState string + var postStatus uint64 + if len(receipt.PostState) == 0 { + postStatus = receipt.Status + } else { + postState = common.Bytes2Hex(receipt.PostState) + } + + rctID, err := sdi.oldDBWriter.InsertReceiptCID(tx.oldDBTx, &v2Models.ReceiptModel{ TxID: txID, Contract: contract, ContractHash: contractHash, LeafCID: args.rctLeafNodeCIDs[i].String(), LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), LogRoot: args.rctNodes[i].LogRoot.String(), + PostState: postState, + PostStatus: postStatus, + }) + if err != nil { + return err } - if len(receipt.PostState) == 0 { - rctModel.PostStatus = receipt.Status - } else { - rctModel.PostState = common.Bytes2Hex(receipt.PostState) - } - - if err := sdi.dbWriter.upsertReceiptCID(tx.dbtx, rctModel); err != nil { + if err := sdi.newDBWriter.InsertReceiptCID(tx.newDBTx, &v3Models.ReceiptModel{ + TxID: txHash, + Contract: contract, + ContractHash: contractHash, + LeafCID: args.rctLeafNodeCIDs[i].String(), + LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), + LogRoot: args.rctNodes[i].LogRoot.String(), + PostState: postState, + PostStatus: postStatus, + }); err != nil { return err } // index logs - logDataSet := make([]*models.LogsModel, len(receipt.Logs)) + rctLen := len(receipt.Logs) + oldLogDataSet := make([]*v2Models.LogsModel, rctLen) + newLogDataSet := make([]*v3Models.LogsModel, rctLen) for idx, l := range receipt.Logs { topicSet := make([]string, 4) for ti, topic := range l.Topics { @@ -404,8 +540,20 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return fmt.Errorf("invalid log cid") } - logDataSet[idx] = &models.LogsModel{ - ReceiptID: txID, + oldLogDataSet[idx] = &v2Models.LogsModel{ + ReceiptID: rctID, + Address: l.Address.String(), + Index: int64(l.Index), + Data: l.Data, + LeafCID: args.logLeafNodeCIDs[i][idx].String(), + LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + Topic0: topicSet[0], + Topic1: topicSet[1], + Topic2: topicSet[2], + Topic3: topicSet[3], + } + newLogDataSet[idx] = &v3Models.LogsModel{ + ReceiptID: txHash, Address: l.Address.String(), Index: int64(l.Index), Data: l.Data, @@ -417,8 +565,10 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs Topic3: topicSet[3], } } - - if err := sdi.dbWriter.upsertLogCID(tx.dbtx, logDataSet); err != nil { + if err := sdi.oldDBWriter.InsertLogCID(tx.oldDBTx, oldLogDataSet); err != nil { + return err + } + if err := sdi.newDBWriter.InsertLogCID(tx.newDBTx, newLogDataSet); err != nil { return err } } @@ -433,7 +583,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs } // PushStateNode publishes and indexes a state diff node object (including any child storage nodes) in the IPLD sql -func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerID string) error { +func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error { tx, ok := batch.(*BatchTx) if !ok { return fmt.Errorf("sql batch is expected to be of type %T, got %T", &BatchTx{}, batch) @@ -442,30 +592,47 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if stateNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - stateModel := models.StateNodeModel{ + _, err := sdi.oldDBWriter.InsertStateCID(tx.oldDBTx, &v2Models.StateNodeModel{ HeaderID: headerID, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: shared.RemovedNodeStateCID, MhKey: shared.RemovedNodeMhKey, NodeType: stateNode.NodeType.Int(), + }) + if err != nil { + return err } - return sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel) + return sdi.newDBWriter.InsertStateCID(tx.newDBTx, &v3Models.StateNodeModel{ + HeaderID: headerHash, + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: shared.RemovedNodeStateCID, + MhKey: shared.RemovedNodeMhKey, + NodeType: stateNode.NodeType.Int(), + }) } - stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld2.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) + stateCIDStr, stateMhKey, err := tx.cacheRaw(ipld.MEthStateTrie, multihash.KECCAK_256, stateNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing state node IPLD: %v", err) } - stateModel := models.StateNodeModel{ + // index the state node + stateID, err := sdi.oldDBWriter.InsertStateCID(tx.oldDBTx, &v2Models.StateNodeModel{ HeaderID: headerID, Path: stateNode.Path, StateKey: common.BytesToHash(stateNode.LeafKey).String(), CID: stateCIDStr, MhKey: stateMhKey, NodeType: stateNode.NodeType.Int(), - } - // index the state node - if err := sdi.dbWriter.upsertStateCID(tx.dbtx, stateModel); err != nil { + }) + if err := sdi.newDBWriter.InsertStateCID(tx.newDBTx, &v3Models.StateNodeModel{ + HeaderID: headerHash, + Path: stateNode.Path, + StateKey: common.BytesToHash(stateNode.LeafKey).String(), + CID: stateCIDStr, + MhKey: stateMhKey, + NodeType: stateNode.NodeType.Int(), + }); err != nil { return err } // if we have a leaf, decode and index the account data @@ -481,15 +648,23 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if err := rlp.DecodeBytes(i[1].([]byte), &account); err != nil { return fmt.Errorf("error decoding state account rlp: %s", err.Error()) } - accountModel := models.StateAccountModel{ - HeaderID: headerID, + if err := sdi.oldDBWriter.InsertStateAccount(tx.oldDBTx, &v2Models.StateAccountModel{ + StateID: stateID, + Balance: account.Balance.String(), + Nonce: account.Nonce, + CodeHash: account.CodeHash, + StorageRoot: account.Root.String(), + }); err != nil { + return err + } + if err := sdi.newDBWriter.InsertStateAccount(tx.newDBTx, &v3Models.StateAccountModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Balance: account.Balance.String(), Nonce: account.Nonce, CodeHash: account.CodeHash, StorageRoot: account.Root.String(), - } - if err := sdi.dbWriter.upsertStateAccount(tx.dbtx, accountModel); err != nil { + }); err != nil { return err } } @@ -498,34 +673,52 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt if storageNode.NodeType == sdtypes.Removed { // short circuit if it is a Removed node // this assumes the db has been initialized and a public.blocks entry for the Removed node is present - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + if err := sdi.oldDBWriter.InsertStorageCID(tx.oldDBTx, &v2Models.StorageNodeModel{ + StateID: stateID, + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).String(), + CID: shared.RemovedNodeStorageCID, + MhKey: shared.RemovedNodeMhKey, + NodeType: storageNode.NodeType.Int(), + }); err != nil { + return err + } + if err := sdi.newDBWriter.InsertStorageCID(tx.newDBTx, &v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: shared.RemovedNodeStorageCID, MhKey: shared.RemovedNodeMhKey, NodeType: storageNode.NodeType.Int(), - } - if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil { + }); err != nil { return err } continue } - storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld2.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue) + storageCIDStr, storageMhKey, err := tx.cacheRaw(ipld.MEthStorageTrie, multihash.KECCAK_256, storageNode.NodeValue) if err != nil { return fmt.Errorf("error generating and cacheing storage node IPLD: %v", err) } - storageModel := models.StorageNodeModel{ - HeaderID: headerID, + if err := sdi.oldDBWriter.InsertStorageCID(tx.oldDBTx, &v2Models.StorageNodeModel{ + StateID: stateID, + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).String(), + CID: storageCIDStr, + MhKey: storageMhKey, + NodeType: storageNode.NodeType.Int(), + }); err != nil { + return err + } + if err := sdi.newDBWriter.InsertStorageCID(tx.newDBTx, &v3Models.StorageNodeModel{ + HeaderID: headerHash, StatePath: stateNode.Path, Path: storageNode.Path, StorageKey: common.BytesToHash(storageNode.LeafKey).String(), CID: storageCIDStr, MhKey: storageMhKey, NodeType: storageNode.NodeType.Int(), - } - if err := sdi.dbWriter.upsertStorageCID(tx.dbtx, storageModel); err != nil { + }); err != nil { return err } } @@ -550,5 +743,8 @@ func (sdi *StateDiffIndexer) PushCodeAndCodeHash(batch interfaces.Batch, codeAnd // Close satisfies io.Closer func (sdi *StateDiffIndexer) Close() error { - return sdi.dbWriter.Close() + if err := sdi.oldDBWriter.Close(); err != nil { + return err + } + return sdi.newDBWriter.Close() } diff --git a/statediff/indexer/database/sql/indexer_shared_test.go b/statediff/indexer/database/sql/indexer_shared_test.go index 8bbab22ba..997267c46 100644 --- a/statediff/indexer/database/sql/indexer_shared_test.go +++ b/statediff/indexer/database/sql/indexer_shared_test.go @@ -12,14 +12,13 @@ import ( "github.com/multiformats/go-multihash" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/ipld" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" ) var ( - db sql.Database + db interfaces.Database err error ind interfaces.StateDiffIndexer ipfsPgGet = `SELECT data FROM public.blocks diff --git a/statediff/indexer/database/sql/interfaces.go b/statediff/indexer/database/sql/interfaces.go deleted file mode 100644 index 445b35d9b..000000000 --- a/statediff/indexer/database/sql/interfaces.go +++ /dev/null @@ -1,87 +0,0 @@ -// VulcanizeDB -// Copyright © 2021 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package sql - -import ( - "context" - "io" - "time" -) - -// Database interfaces required by the sql indexer -type Database interface { - Driver - Statements -} - -// Driver interface has all the methods required by a driver implementation to support the sql indexer -type Driver interface { - QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow - Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) - Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error - Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error - Begin(ctx context.Context) (Tx, error) - Stats() Stats - NodeID() string - Context() context.Context - io.Closer -} - -// Statements interface to accommodate different SQL query syntax -type Statements interface { - InsertHeaderStm() string - InsertUncleStm() string - InsertTxStm() string - InsertAccessListElementStm() string - InsertRctStm() string - InsertLogStm() string - InsertStateStm() string - InsertAccountStm() string - InsertStorageStm() string - InsertIPLDStm() string - InsertIPLDsStm() string -} - -// Tx interface to accommodate different concrete SQL transaction types -type Tx interface { - QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow - Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) - Commit(ctx context.Context) error - Rollback(ctx context.Context) error -} - -// ScannableRow interface to accommodate different concrete row types -type ScannableRow interface { - Scan(dest ...interface{}) error -} - -// Result interface to accommodate different concrete result types -type Result interface { - RowsAffected() (int64, error) -} - -// Stats interface to accommodate different concrete sql stats types -type Stats interface { - MaxOpen() int64 - Open() int64 - InUse() int64 - Idle() int64 - WaitCount() int64 - WaitDuration() time.Duration - MaxIdleClosed() int64 - MaxLifetimeClosed() int64 -} diff --git a/statediff/indexer/database/sql/metrics.go b/statediff/indexer/database/sql/metrics.go index b0946a722..f59edcf14 100644 --- a/statediff/indexer/database/sql/metrics.go +++ b/statediff/indexer/database/sql/metrics.go @@ -19,6 +19,8 @@ package sql import ( "strings" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/metrics" ) @@ -39,57 +41,57 @@ func metricName(subsystem, name string) string { return strings.Join(parts, "/") } -type indexerMetricsHandles struct { +type IndexerMetricsHandles struct { // The total number of processed blocks - blocks metrics.Counter + Blocks metrics.Counter // The total number of processed transactions - transactions metrics.Counter + Transactions metrics.Counter // The total number of processed receipts - receipts metrics.Counter + Receipts metrics.Counter // The total number of processed logs - logs metrics.Counter + Logs metrics.Counter // The total number of access list entries processed - accessListEntries metrics.Counter + AccessListEntries metrics.Counter // Time spent waiting for free postgres tx - tFreePostgres metrics.Timer + TimeFreePostgres metrics.Timer // Postgres transaction commit duration - tPostgresCommit metrics.Timer + TimePostgresCommit metrics.Timer // Header processing time - tHeaderProcessing metrics.Timer + TimeHeaderProcessing metrics.Timer // Uncle processing time - tUncleProcessing metrics.Timer + TimeUncleProcessing metrics.Timer // Tx and receipt processing time - tTxAndRecProcessing metrics.Timer + TimeTxAndRecProcessing metrics.Timer // State, storage, and code combined processing time - tStateStoreCodeProcessing metrics.Timer + TimeStateStoreCodeProcessing metrics.Timer } -func RegisterIndexerMetrics(reg metrics.Registry) indexerMetricsHandles { - ctx := indexerMetricsHandles{ - blocks: metrics.NewCounter(), - transactions: metrics.NewCounter(), - receipts: metrics.NewCounter(), - logs: metrics.NewCounter(), - accessListEntries: metrics.NewCounter(), - tFreePostgres: metrics.NewTimer(), - tPostgresCommit: metrics.NewTimer(), - tHeaderProcessing: metrics.NewTimer(), - tUncleProcessing: metrics.NewTimer(), - tTxAndRecProcessing: metrics.NewTimer(), - tStateStoreCodeProcessing: metrics.NewTimer(), +func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles { + ctx := IndexerMetricsHandles{ + Blocks: metrics.NewCounter(), + Transactions: metrics.NewCounter(), + Receipts: metrics.NewCounter(), + Logs: metrics.NewCounter(), + AccessListEntries: metrics.NewCounter(), + TimeFreePostgres: metrics.NewTimer(), + TimePostgresCommit: metrics.NewTimer(), + TimeHeaderProcessing: metrics.NewTimer(), + TimeUncleProcessing: metrics.NewTimer(), + TimeTxAndRecProcessing: metrics.NewTimer(), + TimeStateStoreCodeProcessing: metrics.NewTimer(), } subsys := "indexer" - reg.Register(metricName(subsys, "blocks"), ctx.blocks) - reg.Register(metricName(subsys, "transactions"), ctx.transactions) - reg.Register(metricName(subsys, "receipts"), ctx.receipts) - reg.Register(metricName(subsys, "logs"), ctx.logs) - reg.Register(metricName(subsys, "access_list_entries"), ctx.accessListEntries) - reg.Register(metricName(subsys, "t_free_postgres"), ctx.tFreePostgres) - reg.Register(metricName(subsys, "t_postgres_commit"), ctx.tPostgresCommit) - reg.Register(metricName(subsys, "t_header_processing"), ctx.tHeaderProcessing) - reg.Register(metricName(subsys, "t_uncle_processing"), ctx.tUncleProcessing) - reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.tTxAndRecProcessing) - reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.tStateStoreCodeProcessing) + reg.Register(metricName(subsys, "blocks"), ctx.Blocks) + reg.Register(metricName(subsys, "transactions"), ctx.Transactions) + reg.Register(metricName(subsys, "receipts"), ctx.Receipts) + reg.Register(metricName(subsys, "logs"), ctx.Logs) + reg.Register(metricName(subsys, "access_list_entries"), ctx.AccessListEntries) + reg.Register(metricName(subsys, "t_free_postgres"), ctx.TimeFreePostgres) + reg.Register(metricName(subsys, "t_postgres_commit"), ctx.TimePostgresCommit) + reg.Register(metricName(subsys, "t_header_processing"), ctx.TimeHeaderProcessing) + reg.Register(metricName(subsys, "t_uncle_processing"), ctx.TimeUncleProcessing) + reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.TimeTxAndRecProcessing) + reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.TimeStateStoreCodeProcessing) return ctx } @@ -135,7 +137,7 @@ func RegisterDBMetrics(reg metrics.Registry) dbMetricsHandles { return ctx } -func (met *dbMetricsHandles) Update(stats Stats) { +func (met *dbMetricsHandles) Update(stats interfaces.Stats) { met.maxOpen.Update(stats.MaxOpen()) met.open.Update(stats.Open()) met.inUse.Update(stats.InUse()) diff --git a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go index 768652b46..37ea11881 100644 --- a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" @@ -34,10 +36,10 @@ func setupLegacyPGX(t *testing.T) { mockLegacyBlock = legacyData.MockBlock legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - db, err = postgres.SetupPGXDB() + db, err = postgres.SetupV3PGXDB() require.NoError(t, err) - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db) + ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil) require.NoError(t, err) var tx interfaces.Batch tx, err = ind.PushBlock( @@ -52,7 +54,7 @@ func setupLegacyPGX(t *testing.T) { } }() for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String()) + err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String(), 0) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/pgx_indexer_test.go b/statediff/indexer/database/sql/pgx_indexer_test.go index 110f5f223..deb4bd6c6 100644 --- a/statediff/indexer/database/sql/pgx_indexer_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_test.go @@ -32,17 +32,19 @@ import ( "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + nodeInfo "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) func setupPGX(t *testing.T) { - db, err = postgres.SetupPGXDB() + db, err = postgres.SetupV3PGXDB() if err != nil { t.Fatal(err) } - ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db) + ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeInfo.Info{}, db, nil) require.NoError(t, err) var tx interfaces.Batch tx, err = ind.PushBlock( @@ -58,7 +60,7 @@ func setupPGX(t *testing.T) { } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) + err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0) require.NoError(t, err) } @@ -197,7 +199,7 @@ func TestPGXIndexer(t *testing.T) { if txRes.Value != transactions[3].Value().String() { t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) } - accessListElementModels := make([]models.AccessListElementModel, 0) + accessListElementModels := make([]v3Models.AccessListElementModel, 0) pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC` err = db.Select(context.Background(), &accessListElementModels, pgStr, c) if err != nil { @@ -206,11 +208,11 @@ func TestPGXIndexer(t *testing.T) { if len(accessListElementModels) != 2 { t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) } - model1 := models.AccessListElementModel{ + model1 := v3Models.AccessListElementModel{ Index: accessListElementModels[0].Index, Address: accessListElementModels[0].Address, } - model2 := models.AccessListElementModel{ + model2 := v3Models.AccessListElementModel{ Index: accessListElementModels[1].Index, Address: accessListElementModels[1].Address, StorageKeys: accessListElementModels[1].StorageKeys, @@ -313,7 +315,7 @@ func TestPGXIndexer(t *testing.T) { expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) for idx, c := range rcts { - result := make([]models.IPLDModel, 0) + result := make([]sharedModels.IPLDModel, 0) pgStr = `SELECT data FROM eth.receipt_cids INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) @@ -396,7 +398,7 @@ func TestPGXIndexer(t *testing.T) { setupPGX(t) defer tearDown(t) // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) + stateNodes := make([]v3Models.StateNodeModel, 0) pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type != 3` @@ -418,7 +420,7 @@ func TestPGXIndexer(t *testing.T) { t.Fatal(err) } pgStr = `SELECT header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel + var account v3Models.StateAccountModel err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path) if err != nil { t.Fatal(err) @@ -428,7 +430,7 @@ func TestPGXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "0", @@ -442,7 +444,7 @@ func TestPGXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "1000", @@ -454,7 +456,7 @@ func TestPGXIndexer(t *testing.T) { } // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) + stateNodes = make([]v3Models.StateNodeModel, 0) pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type = 3` @@ -485,7 +487,7 @@ func TestPGXIndexer(t *testing.T) { setupPGX(t) defer tearDown(t) // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes := make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -497,7 +499,7 @@ func TestPGXIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: storageCID.String(), NodeType: 2, StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), @@ -518,7 +520,7 @@ func TestPGXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes = make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -530,7 +532,7 @@ func TestPGXIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: shared.RemovedNodeStorageCID, NodeType: 3, StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go index 095b4dd24..3ef5c71c9 100644 --- a/statediff/indexer/database/sql/postgres/config.go +++ b/statediff/indexer/database/sql/postgres/config.go @@ -54,6 +54,17 @@ var DefaultConfig = Config{ Password: "password", } +// MultiConfig holds multiple configs +type MultiConfig struct { + V2 Config + V3 Config +} + +// Type satisfies interfaces.Config +func (mc MultiConfig) Type() shared.DBType { + return shared.POSTGRES +} + // Config holds params for a Postgres db type Config struct { // conn string params diff --git a/statediff/indexer/database/sql/postgres/pgx.go b/statediff/indexer/database/sql/postgres/pgx.go index 936a3765d..b720f7fe5 100644 --- a/statediff/indexer/database/sql/postgres/pgx.go +++ b/statediff/indexer/database/sql/postgres/pgx.go @@ -25,21 +25,18 @@ import ( "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" - "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) // PGXDriver driver, implements sql.Driver type PGXDriver struct { - ctx context.Context - pool *pgxpool.Pool - nodeInfo node.Info - nodeID string + ctx context.Context + pool *pgxpool.Pool } // NewPGXDriver returns a new pgx driver // it initializes the connection pool and creates the node info table -func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) { +func NewPGXDriver(ctx context.Context, config Config) (*PGXDriver, error) { pgConf, err := MakeConfig(config) if err != nil { return nil, err @@ -48,11 +45,7 @@ func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDrive if err != nil { return nil, ErrDBConnectionFailed(err) } - pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node} - nodeErr := pg.createNode() - if nodeErr != nil { - return &PGXDriver{}, ErrUnableToSetNode(nodeErr) - } + pg := &PGXDriver{ctx: ctx, pool: dbPool} return pg, nil } @@ -88,27 +81,13 @@ func MakeConfig(config Config) (*pgxpool.Config, error) { return conf, nil } -func (pgx *PGXDriver) createNode() error { - _, err := pgx.pool.Exec( - pgx.ctx, - createNodeStm, - pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID, - pgx.nodeInfo.ID, pgx.nodeInfo.ClientName, - pgx.nodeInfo.ChainID) - if err != nil { - return ErrUnableToSetNode(err) - } - pgx.nodeID = pgx.nodeInfo.ID - return nil -} - // QueryRow satisfies sql.Database -func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { +func (pgx *PGXDriver) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow { return pgx.pool.QueryRow(ctx, sql, args...) } // Exec satisfies sql.Database -func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { +func (pgx *PGXDriver) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) { res, err := pgx.pool.Exec(ctx, sql, args...) return resultWrapper{ct: res}, err } @@ -124,7 +103,7 @@ func (pgx *PGXDriver) Get(ctx context.Context, dest interface{}, query string, a } // Begin satisfies sql.Database -func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) { +func (pgx *PGXDriver) Begin(ctx context.Context) (interfaces.Tx, error) { tx, err := pgx.pool.Begin(ctx) if err != nil { return nil, err @@ -132,16 +111,11 @@ func (pgx *PGXDriver) Begin(ctx context.Context) (sql.Tx, error) { return pgxTxWrapper{tx: tx}, nil } -func (pgx *PGXDriver) Stats() sql.Stats { +func (pgx *PGXDriver) Stats() interfaces.Stats { stats := pgx.pool.Stat() return pgxStatsWrapper{stats: stats} } -// NodeID satisfies sql.Database -func (pgx *PGXDriver) NodeID() string { - return pgx.nodeID -} - // Close satisfies sql.Database/io.Closer func (pgx *PGXDriver) Close() error { pgx.pool.Close() @@ -212,12 +186,12 @@ type pgxTxWrapper struct { } // QueryRow satisfies sql.Tx -func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { +func (t pgxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow { return t.tx.QueryRow(ctx, sql, args...) } // Exec satisfies sql.Tx -func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { +func (t pgxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) { res, err := t.tx.Exec(ctx, sql, args...) return resultWrapper{ct: res}, err } diff --git a/statediff/indexer/database/sql/postgres/sqlx.go b/statediff/indexer/database/sql/postgres/sqlx.go index 406b44a19..733c35734 100644 --- a/statediff/indexer/database/sql/postgres/sqlx.go +++ b/statediff/indexer/database/sql/postgres/sqlx.go @@ -21,23 +21,20 @@ import ( coresql "database/sql" "time" - "github.com/jmoiron/sqlx" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" - "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/jmoiron/sqlx" ) // SQLXDriver driver, implements sql.Driver type SQLXDriver struct { - ctx context.Context - db *sqlx.DB - nodeInfo node.Info - nodeID string + ctx context.Context + db *sqlx.DB } // NewSQLXDriver returns a new sqlx driver for Postgres // it initializes the connection pool and creates the node info table -func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) { +func NewSQLXDriver(ctx context.Context, config Config) (*SQLXDriver, error) { db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString()) if err != nil { return &SQLXDriver{}, ErrDBConnectionFailed(err) @@ -52,33 +49,17 @@ func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDri lifetime := config.MaxConnLifetime db.SetConnMaxLifetime(lifetime) } - driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node} - if err := driver.createNode(); err != nil { - return &SQLXDriver{}, ErrUnableToSetNode(err) - } + driver := &SQLXDriver{ctx: ctx, db: db} return driver, nil } -func (driver *SQLXDriver) createNode() error { - _, err := driver.db.Exec( - createNodeStm, - driver.nodeInfo.GenesisBlock, driver.nodeInfo.NetworkID, - driver.nodeInfo.ID, driver.nodeInfo.ClientName, - driver.nodeInfo.ChainID) - if err != nil { - return ErrUnableToSetNode(err) - } - driver.nodeID = driver.nodeInfo.ID - return nil -} - // QueryRow satisfies sql.Database -func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) sql.ScannableRow { +func (driver *SQLXDriver) QueryRow(_ context.Context, sql string, args ...interface{}) interfaces.ScannableRow { return driver.db.QueryRowx(sql, args...) } // Exec satisfies sql.Database -func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (sql.Result, error) { +func (driver *SQLXDriver) Exec(_ context.Context, sql string, args ...interface{}) (interfaces.Result, error) { return driver.db.Exec(sql, args...) } @@ -93,7 +74,7 @@ func (driver *SQLXDriver) Get(_ context.Context, dest interface{}, query string, } // Begin satisfies sql.Database -func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) { +func (driver *SQLXDriver) Begin(_ context.Context) (interfaces.Tx, error) { tx, err := driver.db.Beginx() if err != nil { return nil, err @@ -101,16 +82,11 @@ func (driver *SQLXDriver) Begin(_ context.Context) (sql.Tx, error) { return sqlxTxWrapper{tx: tx}, nil } -func (driver *SQLXDriver) Stats() sql.Stats { +func (driver *SQLXDriver) Stats() interfaces.Stats { stats := driver.db.Stats() return sqlxStatsWrapper{stats: stats} } -// NodeID satisfies sql.Database -func (driver *SQLXDriver) NodeID() string { - return driver.nodeID -} - // Close satisfies sql.Database/io.Closer func (driver *SQLXDriver) Close() error { return driver.db.Close() @@ -170,12 +146,12 @@ type sqlxTxWrapper struct { } // QueryRow satisfies sql.Tx -func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) sql.ScannableRow { +func (t sqlxTxWrapper) QueryRow(ctx context.Context, sql string, args ...interface{}) interfaces.ScannableRow { return t.tx.QueryRowx(sql, args...) } // Exec satisfies sql.Tx -func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) { +func (t sqlxTxWrapper) Exec(ctx context.Context, sql string, args ...interface{}) (interfaces.Result, error) { return t.tx.Exec(sql, args...) } diff --git a/statediff/indexer/database/sql/postgres/test_helpers.go b/statediff/indexer/database/sql/postgres/test_helpers.go index 491701c4b..db29584b4 100644 --- a/statediff/indexer/database/sql/postgres/test_helpers.go +++ b/statediff/indexer/database/sql/postgres/test_helpers.go @@ -19,24 +19,43 @@ package postgres import ( "context" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" - "github.com/ethereum/go-ethereum/statediff/indexer/node" + v2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v2" + v3 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres/v3" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) -// SetupSQLXDB is used to setup a sqlx db for tests -func SetupSQLXDB() (sql.Database, error) { - driver, err := NewSQLXDriver(context.Background(), DefaultConfig, node.Info{}) +// SetupV3SQLXDB is used to setup a sqlx db for tests +func SetupV3SQLXDB() (interfaces.Database, error) { + driver, err := NewSQLXDriver(context.Background(), DefaultConfig) if err != nil { return nil, err } - return NewPostgresDB(driver), nil + return v3.NewPostgresDB(driver), nil } -// SetupPGXDB is used to setup a pgx db for tests -func SetupPGXDB() (sql.Database, error) { - driver, err := NewPGXDriver(context.Background(), DefaultConfig, node.Info{}) +// SetupV3PGXDB is used to setup a pgx db for tests +func SetupV3PGXDB() (interfaces.Database, error) { + driver, err := NewPGXDriver(context.Background(), DefaultConfig) if err != nil { return nil, err } - return NewPostgresDB(driver), nil + return v3.NewPostgresDB(driver), nil +} + +// SetupV2SQLXDB is used to setup a sqlx db for tests +func SetupV2SQLXDB() (interfaces.Database, error) { + driver, err := NewSQLXDriver(context.Background(), DefaultConfig) + if err != nil { + return nil, err + } + return v2.NewPostgresDB(driver), nil +} + +// SetupV2PGXDB is used to setup a pgx db for tests +func SetupV2PGXDB() (interfaces.Database, error) { + driver, err := NewPGXDriver(context.Background(), DefaultConfig) + if err != nil { + return nil, err + } + return v2.NewPostgresDB(driver), nil } diff --git a/statediff/indexer/database/sql/postgres/v2/database.go b/statediff/indexer/database/sql/postgres/v2/database.go new file mode 100644 index 000000000..f632e09c5 --- /dev/null +++ b/statediff/indexer/database/sql/postgres/v2/database.go @@ -0,0 +1,115 @@ +// VulcanizeDB +// Copyright © 2021 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package postgres + +import ( + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" +) + +var _ interfaces.Database = &DB{} + +const version = 2 + +// NewPostgresDB returns a postgres.DB using the provided driver +func NewPostgresDB(driver interfaces.Driver) *DB { + return &DB{driver} +} + +// DB implements sql.Database using a configured driver and Postgres statement syntax +type DB struct { + interfaces.Driver +} + +// InsertNodeInfoStm satisfies interfaces.Statements +func (db *DB) InsertNodeInfoStm() string { + return `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING` +} + +// InsertHeaderStm satisfies the interfaces.Statements +func (db *DB) InsertHeaderStm() string { + return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) + ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) + RETURNING id` +} + +// InsertUncleStm satisfies the interfaces.Statements +func (db *DB) InsertUncleStm() string { + return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (header_id, block_hash) DO UPDATE SET (parent_hash, cid, reward, mh_key) = ($3, $4, $5, $6)` +} + +// InsertTxStm satisfies the interfaces.Statements +func (db *DB) InsertTxStm() string { + return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src, index, mh_key, tx_data, tx_type) = ($3, $4, $5, $6, $7, $8, $9) + RETURNING id` +} + +// InsertAccessListElementStm satisfies the interfaces.Statements +func (db *DB) InsertAccessListElementStm() string { + return `INSERT INTO eth.access_list_element (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) + ON CONFLICT (tx_id, index) DO UPDATE SET (address, storage_keys) = ($3, $4)` +} + +// InsertRctStm satisfies the interfaces.Statements +func (db *DB) InsertRctStm() string { + return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (tx_id) DO UPDATE SET (leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) = ($2, $3, $4, $5, $6, $7, $8) + RETURNING id` +} + +// InsertLogStm satisfies the interfaces.Statements +func (db *DB) InsertLogStm() string { + return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, receipt_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) + ON CONFLICT (receipt_id, index) DO UPDATE SET (leaf_cid, leaf_mh_key ,address, topic0, topic1, topic2, topic3,log_data ) = ($1, $2, $4, $6, $7, $8, $9, $10)` +} + +// InsertStateStm satisfies the interfaces.Statements +func (db *DB) InsertStateStm() string { + return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) + RETURNING id` +} + +// InsertAccountStm satisfies the interfaces.Statements +func (db *DB) InsertAccountStm() string { + return `INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (state_id) DO UPDATE SET (balance, nonce, code_hash, storage_root) = ($2, $3, $4, $5)` +} + +// InsertStorageStm satisfies the interfaces.Statements +func (db *DB) InsertStorageStm() string { + return `INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)` +} + +// InsertIPLDStm satisfies the interfaces.Statements +func (db *DB) InsertIPLDStm() string { + return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING` +} + +// InsertIPLDsStm satisfies the interfaces.Statements +func (db *DB) InsertIPLDsStm() string { + return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING` +} + +// Version satisfies the interfaces.Version +func (db *DB) Version() uint { + return version +} diff --git a/statediff/indexer/database/sql/postgres/database.go b/statediff/indexer/database/sql/postgres/v3/database.go similarity index 59% rename from statediff/indexer/database/sql/postgres/database.go rename to statediff/indexer/database/sql/postgres/v3/database.go index 4cff518a0..f214acc7f 100644 --- a/statediff/indexer/database/sql/postgres/database.go +++ b/statediff/indexer/database/sql/postgres/v3/database.go @@ -16,86 +16,96 @@ package postgres -import "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" - -var _ sql.Database = &DB{} - -const ( - createNodeStm = `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (node_id) DO NOTHING` +import ( + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) +var _ interfaces.Database = &DB{} + +const version = 3 + // NewPostgresDB returns a postgres.DB using the provided driver -func NewPostgresDB(driver sql.Driver) *DB { +func NewPostgresDB(driver interfaces.Driver) *DB { return &DB{driver} } // DB implements sql.Database using a configured driver and Postgres statement syntax type DB struct { - sql.Driver + interfaces.Driver } -// InsertHeaderStm satisfies the sql.Statements interface +// InsertNodeInfoStm satisfies interfaces.Statements +func (db *DB) InsertNodeInfoStm() string { + return `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (node_id) DO NOTHING` +} + +// InsertHeaderStm satisfies the interfaces.Statements func (db *DB) InsertHeaderStm() string { return `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) - ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)` + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) + ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16)` } -// InsertUncleStm satisfies the sql.Statements interface +// InsertUncleStm satisfies the interfaces.Statements func (db *DB) InsertUncleStm() string { return `INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (block_hash) DO NOTHING` + ON CONFLICT (block_hash) DO NOTHING` } -// InsertTxStm satisfies the sql.Statements interface +// InsertTxStm satisfies the interfaces.Statements func (db *DB) InsertTxStm() string { return `INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - ON CONFLICT (tx_hash) DO NOTHING` + ON CONFLICT (tx_hash) DO NOTHING` } -// InsertAccessListElementStm satisfies the sql.Statements interface +// InsertAccessListElementStm satisfies the interfaces.Statements func (db *DB) InsertAccessListElementStm() string { return `INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) - ON CONFLICT (tx_id, index) DO NOTHING` + ON CONFLICT (tx_id, index) DO NOTHING` } -// InsertRctStm satisfies the sql.Statements interface +// InsertRctStm satisfies the interfaces.Statements func (db *DB) InsertRctStm() string { return `INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (tx_id) DO NOTHING` + ON CONFLICT (tx_id) DO NOTHING` } -// InsertLogStm satisfies the sql.Statements interface +// InsertLogStm satisfies the interfaces.Statements func (db *DB) InsertLogStm() string { return `INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) - ON CONFLICT (rct_id, index) DO NOTHING` + ON CONFLICT (rct_id, index) DO NOTHING` } -// InsertStateStm satisfies the sql.Statements interface +// InsertStateStm satisfies the interfaces.Statements func (db *DB) InsertStateStm() string { return `INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) - ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)` + ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)` } -// InsertAccountStm satisfies the sql.Statements interface +// InsertAccountStm satisfies the interfaces.Statements func (db *DB) InsertAccountStm() string { return `INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (header_id, state_path) DO NOTHING` + ON CONFLICT (header_id, state_path) DO NOTHING` } -// InsertStorageStm satisfies the sql.Statements interface +// InsertStorageStm satisfies the interfaces.Statements func (db *DB) InsertStorageStm() string { return `INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) - ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)` + ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8)` } -// InsertIPLDStm satisfies the sql.Statements interface +// InsertIPLDStm satisfies the interfaces.Statements func (db *DB) InsertIPLDStm() string { return `INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING` } -// InsertIPLDsStm satisfies the sql.Statements interface +// InsertIPLDsStm satisfies the interfaces.Statements func (db *DB) InsertIPLDsStm() string { return `INSERT INTO public.blocks (key, data) VALUES (unnest($1::TEXT[]), unnest($2::BYTEA[])) ON CONFLICT (key) DO NOTHING` } + +// Version satisfies the interfaces.Version +func (db *DB) Version() uint { + return version +} diff --git a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go index 08f3f080e..95d03cb8e 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/ipfs/go-cid" "github.com/jmoiron/sqlx" "github.com/multiformats/go-multihash" @@ -44,10 +46,10 @@ func setupLegacySQLX(t *testing.T) { mockLegacyBlock = legacyData.MockBlock legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - db, err = postgres.SetupSQLXDB() + db, err = postgres.SetupV3SQLXDB() require.NoError(t, err) - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db) + ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil) require.NoError(t, err) var tx interfaces.Batch tx, err = ind.PushBlock( @@ -62,7 +64,7 @@ func setupLegacySQLX(t *testing.T) { } }() for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String()) + err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String(), 0) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/sqlx_indexer_test.go b/statediff/indexer/database/sql/sqlx_indexer_test.go index 5538a5f93..cd8ed4e33 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_test.go @@ -33,17 +33,19 @@ import ( "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) func setupSQLX(t *testing.T) { - db, err = postgres.SetupSQLXDB() + db, err = postgres.SetupV3SQLXDB() if err != nil { t.Fatal(err) } - ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db) + ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeinfo.Info{}, db, nil) require.NoError(t, err) var tx interfaces.Batch tx, err = ind.PushBlock( @@ -59,7 +61,7 @@ func setupSQLX(t *testing.T) { } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) + err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0) require.NoError(t, err) } @@ -200,7 +202,7 @@ func TestSQLXIndexer(t *testing.T) { if txRes.Value != transactions[3].Value().String() { t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) } - accessListElementModels := make([]models.AccessListElementModel, 0) + accessListElementModels := make([]v3Models.AccessListElementModel, 0) pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC` err = db.Select(context.Background(), &accessListElementModels, pgStr, c) if err != nil { @@ -209,11 +211,11 @@ func TestSQLXIndexer(t *testing.T) { if len(accessListElementModels) != 2 { t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) } - model1 := models.AccessListElementModel{ + model1 := v3Models.AccessListElementModel{ Index: accessListElementModels[0].Index, Address: accessListElementModels[0].Address, } - model2 := models.AccessListElementModel{ + model2 := v3Models.AccessListElementModel{ Index: accessListElementModels[1].Index, Address: accessListElementModels[1].Address, StorageKeys: accessListElementModels[1].StorageKeys, @@ -314,7 +316,7 @@ func TestSQLXIndexer(t *testing.T) { expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) for idx, c := range rcts { - result := make([]models.IPLDModel, 0) + result := make([]sharedModels.IPLDModel, 0) pgStr = `SELECT data FROM eth.receipt_cids INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) @@ -396,7 +398,7 @@ func TestSQLXIndexer(t *testing.T) { setupSQLX(t) defer tearDown(t) // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) + stateNodes := make([]v3Models.StateNodeModel, 0) pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type != 3` @@ -418,7 +420,7 @@ func TestSQLXIndexer(t *testing.T) { t.Fatal(err) } pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel + var account v3Models.StateAccountModel err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path) if err != nil { t.Fatal(err) @@ -428,7 +430,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "0", @@ -442,7 +444,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode) - test_helpers.ExpectEqual(t, account, models.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "1000", @@ -454,7 +456,7 @@ func TestSQLXIndexer(t *testing.T) { } // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) + stateNodes = make([]v3Models.StateNodeModel, 0) pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type = 3` @@ -485,7 +487,7 @@ func TestSQLXIndexer(t *testing.T) { setupSQLX(t) defer tearDown(t) // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes := make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -497,7 +499,7 @@ func TestSQLXIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: storageCID.String(), NodeType: 2, StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), @@ -518,7 +520,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) + storageNodes = make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -530,7 +532,7 @@ func TestSQLXIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], models.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: shared.RemovedNodeStorageCID, NodeType: 3, StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), diff --git a/statediff/indexer/database/sql/v2/writer.go b/statediff/indexer/database/sql/v2/writer.go new file mode 100644 index 000000000..feeb49c56 --- /dev/null +++ b/statediff/indexer/database/sql/v2/writer.go @@ -0,0 +1,222 @@ +// VulcanizeDB +// Copyright © 2019 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package sql + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/models/v2" + "github.com/ethereum/go-ethereum/statediff/indexer/node" +) + +var ( + nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") +) + +// Writer handles processing and writing of indexed IPLD objects to Postgres +type Writer struct { + DB interfaces.Database + metrics sql.IndexerMetricsHandles + nodeID int64 +} + +// NewWriter creates a new pointer to a Writer +func NewWriter(db interfaces.Database) *Writer { + return &Writer{ + DB: db, + } +} + +// Close satisfies io.Closer +func (w *Writer) Close() error { + return w.DB.Close() +} + +/* +InsertNodeInfo inserts a node info model +INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) +ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING +*/ +func (w *Writer) InsertNodeInfo(info node.Info) error { + var nodeID int64 + if err := w.DB.QueryRow(w.DB.Context(), w.DB.InsertNodeInfoStm(), info.GenesisBlock, info.NetworkID, info.ID, + info.ClientName, info.ChainID).Scan(&nodeID); err != nil { + return err + } + w.nodeID = nodeID + return nil +} + +/* +InsertHeaderCID inserts a header model +INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) +ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, base_fee) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) +*/ +func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header *models.HeaderModel) (int64, error) { + var headerID int64 + err := tx.QueryRow(w.DB.Context(), w.DB.InsertHeaderStm(), + header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.nodeID, + header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom, + header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID) + if err != nil { + return 0, fmt.Errorf("error inserting header_cids entry: %v", err) + } + w.metrics.Blocks.Inc(1) + return headerID, nil +} + +/* +InsertUncleCID inserts an uncle model +INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) +ON CONFLICT (header_id, block_hash) DO NOTHING +*/ +func (w *Writer) InsertUncleCID(tx interfaces.Tx, uncle *models.UncleModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertUncleStm(), + uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) + if err != nil { + return fmt.Errorf("error inserting uncle_cids entry: %v", err) + } + return nil +} + +/* +InsertTransactionCID inserts a tx model +INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) +ON CONFLICT (header_id, tx_hash) DO NOTHING +*/ +func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxModel) (int64, error) { + var txID int64 + err := tx.QueryRow(w.DB.Context(), w.DB.InsertTxStm(), + transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, + transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID) + if err != nil { + return 0, fmt.Errorf("error inserting transaction_cids entry: %v", err) + } + w.metrics.Transactions.Inc(1) + return txID, nil +} + +/* +InsertAccessListElement inserts an access list element model +INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) +ON CONFLICT (tx_id, index) DO NOTHING +*/ +func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *models.AccessListElementModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(), + accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) + if err != nil { + return fmt.Errorf("error inserting access_list_element entry: %v", err) + } + w.metrics.AccessListEntries.Inc(1) + return nil +} + +/* +InsertReceiptCID inserts a receipt model +INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +ON CONFLICT (tx_id) DO NOTHING +*/ +func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) (int64, error) { + var receiptID int64 + err := tx.QueryRow(w.DB.Context(), w.DB.InsertRctStm(), + rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID) + if err != nil { + return 0, fmt.Errorf("error inserting receipt_cids entry: %w", err) + } + w.metrics.Receipts.Inc(1) + return receiptID, nil +} + +/* +InsertLogCID inserts a log model +INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) +ON CONFLICT (receipt_id, index) DO NOTHING +*/ +func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error { + for _, log := range logs { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertLogStm(), + log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, + log.Topic3, log.Data) + if err != nil { + return fmt.Errorf("error inserting logs entry: %w", err) + } + w.metrics.Logs.Inc(1) + } + return nil +} + +/* +InsertStateCID inserts a state model +INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) +ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) +*/ +func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeModel) (int64, error) { + var stateID int64 + var stateKey string + if stateNode.StateKey != nullHash.String() { + stateKey = stateNode.StateKey + } + err := tx.QueryRow(w.DB.Context(), w.DB.InsertStateStm(), + stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) + if err != nil { + return 0, fmt.Errorf("error inserting state_cids entry: %v", err) + } + return stateID, nil +} + +/* +InsertStateAccount inserts a state account model +INSERT INTO eth.state_accounts (state_id, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6) +ON CONFLICT (state_id) DO NOTHING +*/ +func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.StateAccountModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccountStm(), + stateAccount.StateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, + stateAccount.StorageRoot) + if err != nil { + return fmt.Errorf("error inserting state_accounts entry: %v", err) + } + return nil +} + +/* +InsertStorageCID inserts a storage model +INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) +ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) +*/ +func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNodeModel) error { + var storageKey string + if storageCID.StorageKey != nullHash.String() { + storageKey = storageCID.StorageKey + } + _, err := tx.Exec(w.DB.Context(), w.DB.InsertStorageStm(), + storageCID.StateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, + true, storageCID.MhKey) + if err != nil { + return fmt.Errorf("error inserting storage_cids entry: %v", err) + } + return nil +} + +// Stats returns the stats for the underlying DB +func (w *Writer) Stats() interfaces.Stats { + return w.DB.Stats() +} diff --git a/statediff/indexer/database/sql/writer.go b/statediff/indexer/database/sql/v3/writer.go similarity index 60% rename from statediff/indexer/database/sql/writer.go rename to statediff/indexer/database/sql/v3/writer.go index 3f1dfc0b5..593cb3339 100644 --- a/statediff/indexer/database/sql/writer.go +++ b/statediff/indexer/database/sql/v3/writer.go @@ -20,7 +20,10 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + "github.com/ethereum/go-ethereum/statediff/indexer/node" ) var ( @@ -29,156 +32,181 @@ var ( // Writer handles processing and writing of indexed IPLD objects to Postgres type Writer struct { - db Database + DB interfaces.Database + metrics sql.IndexerMetricsHandles + nodeID string } // NewWriter creates a new pointer to a Writer -func NewWriter(db Database) *Writer { +func NewWriter(db interfaces.Database) *Writer { return &Writer{ - db: db, + DB: db, } } // Close satisfies io.Closer func (w *Writer) Close() error { - return w.db.Close() + return w.DB.Close() } /* +InsertNodeInfo inserts a node info model +INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) +ON CONFLICT (node_id) DO NOTHING +*/ +func (w *Writer) InsertNodeInfo(info node.Info) error { + if _, err := w.DB.Exec(w.DB.Context(), w.DB.InsertNodeInfoStm(), info.GenesisBlock, info.NetworkID, info.ID, + info.ClientName, info.ChainID); err != nil { + return err + } + w.nodeID = info.ID + return nil +} + +/* +InsertHeaderCID inserts a header model INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) */ -func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertHeaderStm(), - header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.db.NodeID(), +func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header models.HeaderModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertHeaderStm(), + header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.nodeID, header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.Coinbase) if err != nil { - return fmt.Errorf("error upserting header_cids entry: %v", err) + return fmt.Errorf("error inserting header_cids entry: %v", err) } - indexerMetrics.blocks.Inc(1) + w.metrics.Blocks.Inc(1) return nil } /* +InsertUncleCID inserts an uncle model INSERT INTO eth.uncle_cids (block_hash, header_id, parent_hash, cid, reward, mh_key) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_hash) DO NOTHING */ -func (w *Writer) upsertUncleCID(tx Tx, uncle models.UncleModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertUncleStm(), +func (w *Writer) InsertUncleCID(tx interfaces.Tx, uncle *models.UncleModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertUncleStm(), uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) if err != nil { - return fmt.Errorf("error upserting uncle_cids entry: %v", err) + return fmt.Errorf("error inserting uncle_cids entry: %v", err) } return nil } /* +InsertTransactionCID inserts a tx model INSERT INTO eth.transaction_cids (header_id, tx_hash, cid, dst, src, index, mh_key, tx_data, tx_type, value) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT (tx_hash) DO NOTHING */ -func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertTxStm(), +func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertTxStm(), transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value) if err != nil { - return fmt.Errorf("error upserting transaction_cids entry: %v", err) + return fmt.Errorf("error inserting transaction_cids entry: %v", err) } - indexerMetrics.transactions.Inc(1) + w.metrics.Transactions.Inc(1) return nil } /* +InsertAccessListElement inserts an access list element model INSERT INTO eth.access_list_elements (tx_id, index, address, storage_keys) VALUES ($1, $2, $3, $4) ON CONFLICT (tx_id, index) DO NOTHING */ -func (w *Writer) upsertAccessListElement(tx Tx, accessListElement models.AccessListElementModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertAccessListElementStm(), +func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *models.AccessListElementModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(), accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) if err != nil { - return fmt.Errorf("error upserting access_list_element entry: %v", err) + return fmt.Errorf("error inserting access_list_element entry: %v", err) } - indexerMetrics.accessListEntries.Inc(1) + w.metrics.AccessListEntries.Inc(1) return nil } /* +InsertReceiptCID inserts a receipt model INSERT INTO eth.receipt_cids (tx_id, leaf_cid, contract, contract_hash, leaf_mh_key, post_state, post_status, log_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (tx_id) DO NOTHING */ -func (w *Writer) upsertReceiptCID(tx Tx, rct *models.ReceiptModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertRctStm(), +func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertRctStm(), rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot) if err != nil { - return fmt.Errorf("error upserting receipt_cids entry: %w", err) + return fmt.Errorf("error inserting receipt_cids entry: %w", err) } - indexerMetrics.receipts.Inc(1) + w.metrics.Receipts.Inc(1) return nil } /* +InsertLogCID inserts a log model INSERT INTO eth.log_cids (leaf_cid, leaf_mh_key, rct_id, address, index, topic0, topic1, topic2, topic3, log_data) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) ON CONFLICT (rct_id, index) DO NOTHING */ -func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error { +func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error { for _, log := range logs { - _, err := tx.Exec(w.db.Context(), w.db.InsertLogStm(), + _, err := tx.Exec(w.DB.Context(), w.DB.InsertLogStm(), log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) if err != nil { - return fmt.Errorf("error upserting logs entry: %w", err) + return fmt.Errorf("error inserting logs entry: %w", err) } - indexerMetrics.logs.Inc(1) + w.metrics.Logs.Inc(1) } return nil } /* +InsertStateCID inserts a state model INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7) */ -func (w *Writer) upsertStateCID(tx Tx, stateNode models.StateNodeModel) error { +func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeModel) error { var stateKey string if stateNode.StateKey != nullHash.String() { stateKey = stateNode.StateKey } - _, err := tx.Exec(w.db.Context(), w.db.InsertStateStm(), + _, err := tx.Exec(w.DB.Context(), w.DB.InsertStateStm(), stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey) if err != nil { - return fmt.Errorf("error upserting state_cids entry: %v", err) + return fmt.Errorf("error inserting state_cids entry: %v", err) } return nil } /* +InsertStateAccount inserts a state account model INSERT INTO eth.state_accounts (header_id, state_path, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (header_id, state_path) DO NOTHING */ -func (w *Writer) upsertStateAccount(tx Tx, stateAccount models.StateAccountModel) error { - _, err := tx.Exec(w.db.Context(), w.db.InsertAccountStm(), +func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.StateAccountModel) error { + _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccountStm(), stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) if err != nil { - return fmt.Errorf("error upserting state_accounts entry: %v", err) + return fmt.Errorf("error inserting state_accounts entry: %v", err) } return nil } /* +InsertStorageCID inserts a storage model INSERT INTO eth.storage_cids (header_id, state_path, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($3, $4, $6, $7, $8) */ -func (w *Writer) upsertStorageCID(tx Tx, storageCID models.StorageNodeModel) error { +func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNodeModel) error { var storageKey string if storageCID.StorageKey != nullHash.String() { storageKey = storageCID.StorageKey } - _, err := tx.Exec(w.db.Context(), w.db.InsertStorageStm(), + _, err := tx.Exec(w.DB.Context(), w.DB.InsertStorageStm(), storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) if err != nil { - return fmt.Errorf("error upserting storage_cids entry: %v", err) + return fmt.Errorf("error inserting storage_cids entry: %v", err) } return nil } diff --git a/statediff/indexer/interfaces/interfaces.go b/statediff/indexer/interfaces/interfaces.go index 8f951230d..621dcd6e7 100644 --- a/statediff/indexer/interfaces/interfaces.go +++ b/statediff/indexer/interfaces/interfaces.go @@ -17,6 +17,7 @@ package interfaces import ( + "context" "io" "math/big" "time" @@ -28,10 +29,11 @@ import ( // StateDiffIndexer interface required to index statediff data type StateDiffIndexer interface { - PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error) - PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerID string) error + PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, int64, error) + PushStateNode(tx Batch, stateNode sdtypes.StateNode, headerHash string, headerID int64) error PushCodeAndCodeHash(tx Batch, codeAndCodeHash sdtypes.CodeAndCodeHash) error - ReportDBMetrics(delay time.Duration, quit <-chan bool) + ReportOldDBMetrics(delay time.Duration, quit <-chan bool) + ReportNewDBMetrics(delay time.Duration, quit <-chan bool) io.Closer } @@ -44,3 +46,68 @@ type Batch interface { type Config interface { Type() shared.DBType } + +// Database interfaces required by the sql indexer +type Database interface { + Driver + Statements + Version() uint +} + +// Driver interface has all the methods required by a driver implementation to support the sql indexer +type Driver interface { + QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow + Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) + Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Begin(ctx context.Context) (Tx, error) + Stats() Stats + Context() context.Context + io.Closer +} + +// Statements interface to accommodate different SQL query syntax +type Statements interface { + InsertNodeInfoStm() string + InsertHeaderStm() string + InsertUncleStm() string + InsertTxStm() string + InsertAccessListElementStm() string + InsertRctStm() string + InsertLogStm() string + InsertStateStm() string + InsertAccountStm() string + InsertStorageStm() string + InsertIPLDStm() string + InsertIPLDsStm() string +} + +// Tx interface to accommodate different concrete SQL transaction types +type Tx interface { + QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow + Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) + Commit(ctx context.Context) error + Rollback(ctx context.Context) error +} + +// ScannableRow interface to accommodate different concrete row types +type ScannableRow interface { + Scan(dest ...interface{}) error +} + +// Result interface to accommodate different concrete result types +type Result interface { + RowsAffected() (int64, error) +} + +// Stats interface to accommodate different concrete sql stats types +type Stats interface { + MaxOpen() int64 + Open() int64 + InUse() int64 + Idle() int64 + WaitCount() int64 + WaitDuration() time.Duration + MaxIdleClosed() int64 + MaxLifetimeClosed() int64 +} diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go index e5d72e5ba..e10c7d977 100644 --- a/statediff/indexer/mocks/test_data.go +++ b/statediff/indexer/mocks/test_data.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/indexer/models" "github.com/ethereum/go-ethereum/statediff/test_helpers" sdtypes "github.com/ethereum/go-ethereum/statediff/types" "github.com/ethereum/go-ethereum/trie" @@ -105,11 +104,11 @@ var ( Address: AnotherAddress, StorageKeys: []common.Hash{common.BytesToHash(StorageLeafKey), common.BytesToHash(MockStorageLeafKey)}, } - AccessListEntry1Model = models.AccessListElementModel{ + AccessListEntry1Model = v2.AccessListElementModel{ Index: 0, Address: Address.Hex(), } - AccessListEntry2Model = models.AccessListElementModel{ + AccessListEntry2Model = v2.AccessListElementModel{ Index: 1, Address: AnotherAddress.Hex(), StorageKeys: []string{common.BytesToHash(StorageLeafKey).Hex(), common.BytesToHash(MockStorageLeafKey).Hex()}, diff --git a/statediff/indexer/models/v2/batch.go b/statediff/indexer/models/v2/batch.go index 585d59403..92f7e0be0 100644 --- a/statediff/indexer/models/v2/batch.go +++ b/statediff/indexer/models/v2/batch.go @@ -18,12 +18,6 @@ package models import "github.com/lib/pq" -// IPLDBatch holds the arguments for a batch insert of IPLD data -type IPLDBatch struct { - Keys []string - Values [][]byte -} - // UncleBatch is the db model for eth.uncle_cids type UncleBatch struct { IDs []int64 diff --git a/statediff/indexer/models/v2/models.go b/statediff/indexer/models/v2/models.go index d303826b7..cbb7c55f2 100644 --- a/statediff/indexer/models/v2/models.go +++ b/statediff/indexer/models/v2/models.go @@ -20,23 +20,23 @@ import "github.com/lib/pq" // HeaderModel is the db model for eth.header_cids type HeaderModel struct { - ID int64 `db:"id"` - BlockNumber string `db:"block_number"` - BlockHash string `db:"block_hash"` - ParentHash string `db:"parent_hash"` - CID string `db:"cid"` - MhKey string `db:"mh_key"` - TotalDifficulty string `db:"td"` - NodeID int64 `db:"node_id"` - Reward string `db:"reward"` - StateRoot string `db:"state_root"` - UncleRoot string `db:"uncle_root"` - TxRoot string `db:"tx_root"` - RctRoot string `db:"receipt_root"` - Bloom []byte `db:"bloom"` - Timestamp uint64 `db:"timestamp"` - TimesValidated int64 `db:"times_validated"` - BaseFee *int64 `db:"base_fee"` + ID int64 `db:"id"` + BlockNumber string `db:"block_number"` + BlockHash string `db:"block_hash"` + ParentHash string `db:"parent_hash"` + CID string `db:"cid"` + MhKey string `db:"mh_key"` + TotalDifficulty string `db:"td"` + NodeID int64 `db:"node_id"` + Reward string `db:"reward"` + StateRoot string `db:"state_root"` + UncleRoot string `db:"uncle_root"` + TxRoot string `db:"tx_root"` + RctRoot string `db:"receipt_root"` + Bloom []byte `db:"bloom"` + Timestamp uint64 `db:"timestamp"` + TimesValidated int64 `db:"times_validated"` + BaseFee *string `db:"base_fee"` } // UncleModel is the db model for eth.uncle_cids diff --git a/statediff/indexer/models/v3/batch.go b/statediff/indexer/models/v3/batch.go index 16096f292..3df3244b3 100644 --- a/statediff/indexer/models/v3/batch.go +++ b/statediff/indexer/models/v3/batch.go @@ -18,12 +18,6 @@ package models import "github.com/lib/pq" -// IPLDBatch holds the arguments for a batch insert of IPLD data -type IPLDBatch struct { - Keys []string - Values [][]byte -} - // UncleBatch holds the arguments for a batch insert of uncle data type UncleBatch struct { HeaderID []string diff --git a/statediff/service.go b/statediff/service.go index c8c7649fd..88186b99d 100644 --- a/statediff/service.go +++ b/statediff/service.go @@ -164,7 +164,8 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params if err != nil { return err } - indexer.ReportDBMetrics(10*time.Second, quitCh) + indexer.ReportOldDBMetrics(10*time.Second, quitCh) + indexer.ReportNewDBMetrics(10*time.Second, quitCh) } workers := params.NumWorkers if workers == 0 { @@ -661,6 +662,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p // log.Info("Writing state diff", "block height", block.Number().Uint64()) var totalDifficulty *big.Int var receipts types.Receipts + var headerID int64 var err error var tx interfaces.Batch if params.IncludeTD { @@ -669,7 +671,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p if params.IncludeReceipts { receipts = sds.BlockChain.GetReceiptsByHash(block.Hash()) } - tx, err = sds.indexer.PushBlock(block, receipts, totalDifficulty) + tx, headerID, err = sds.indexer.PushBlock(block, receipts, totalDifficulty) if err != nil { return err } @@ -680,7 +682,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p } }() output := func(node types2.StateNode) error { - return sds.indexer.PushStateNode(tx, node, block.Hash().String()) + return sds.indexer.PushStateNode(tx, node, block.Hash().String(), headerID) } codeOutput := func(c types2.CodeAndCodeHash) error { return sds.indexer.PushCodeAndCodeHash(tx, c) -- 2.45.2 From 4519813341e18abb63c93c4459ad0d8a020b18de Mon Sep 17 00:00:00 2001 From: i-norden Date: Sun, 30 Jan 2022 21:17:25 -0600 Subject: [PATCH 3/7] cli wiring --- cmd/geth/config.go | 82 ++++++++++++++++++++-------- cmd/utils/flags.go | 132 ++++++++++++++++++++++++++++++++------------- 2 files changed, 154 insertions(+), 60 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index e84a0b6d4..35fce6ce8 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -201,40 +201,78 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { FilePath: ctx.GlobalString(utils.StateDiffFilePath.Name), } case shared.POSTGRES: - driverTypeStr := ctx.GlobalString(utils.StateDiffDBDriverTypeFlag.Name) - driverType, err := postgres.ResolveDriverType(driverTypeStr) + v2DriverTypeStr := ctx.GlobalString(utils.StateDiffV2DBDriverTypeFlag.Name) + v2DriverType, err := postgres.ResolveDriverType(v2DriverTypeStr) if err != nil { utils.Fatalf("%v", err) } - pgConfig := postgres.Config{ - Hostname: ctx.GlobalString(utils.StateDiffDBHostFlag.Name), - Port: ctx.GlobalInt(utils.StateDiffDBPortFlag.Name), - DatabaseName: ctx.GlobalString(utils.StateDiffDBNameFlag.Name), - Username: ctx.GlobalString(utils.StateDiffDBUserFlag.Name), - Password: ctx.GlobalString(utils.StateDiffDBPasswordFlag.Name), + v2PgConfig := postgres.Config{ + Hostname: ctx.GlobalString(utils.StateDiffV2DBHostFlag.Name), + Port: ctx.GlobalInt(utils.StateDiffV2DBPortFlag.Name), + DatabaseName: ctx.GlobalString(utils.StateDiffV2DBNameFlag.Name), + Username: ctx.GlobalString(utils.StateDiffV2DBUserFlag.Name), + Password: ctx.GlobalString(utils.StateDiffV2DBPasswordFlag.Name), ID: nodeID, ClientName: clientName, - Driver: driverType, + Driver: v2DriverType, } - if ctx.GlobalIsSet(utils.StateDiffDBMinConns.Name) { - pgConfig.MinConns = ctx.GlobalInt(utils.StateDiffDBMinConns.Name) + if ctx.GlobalIsSet(utils.StateDiffV2DBMinConns.Name) { + v2PgConfig.MinConns = ctx.GlobalInt(utils.StateDiffV2DBMinConns.Name) } - if ctx.GlobalIsSet(utils.StateDiffDBMaxConns.Name) { - pgConfig.MaxConns = ctx.GlobalInt(utils.StateDiffDBMaxConns.Name) + if ctx.GlobalIsSet(utils.StateDiffV2DBMaxConns.Name) { + v2PgConfig.MaxConns = ctx.GlobalInt(utils.StateDiffV2DBMaxConns.Name) } - if ctx.GlobalIsSet(utils.StateDiffDBMaxIdleConns.Name) { - pgConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffDBMaxIdleConns.Name) + if ctx.GlobalIsSet(utils.StateDiffV2DBMaxIdleConns.Name) { + v2PgConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffV2DBMaxIdleConns.Name) } - if ctx.GlobalIsSet(utils.StateDiffDBMaxConnLifetime.Name) { - pgConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffDBMaxConnLifetime.Name) * time.Second + if ctx.GlobalIsSet(utils.StateDiffV2DBMaxConnLifetime.Name) { + v2PgConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffV2DBMaxConnLifetime.Name) * time.Second } - if ctx.GlobalIsSet(utils.StateDiffDBMaxConnIdleTime.Name) { - pgConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffDBMaxConnIdleTime.Name) * time.Second + if ctx.GlobalIsSet(utils.StateDiffV2DBMaxConnIdleTime.Name) { + v2PgConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffV2DBMaxConnIdleTime.Name) * time.Second } - if ctx.GlobalIsSet(utils.StateDiffDBConnTimeout.Name) { - pgConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffDBConnTimeout.Name) * time.Second + if ctx.GlobalIsSet(utils.StateDiffV2DBConnTimeout.Name) { + v2PgConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffV2DBConnTimeout.Name) * time.Second + } + + v3DriverTypeStr := ctx.GlobalString(utils.StateDiffV2DBDriverTypeFlag.Name) + v3DriverType, err := postgres.ResolveDriverType(v3DriverTypeStr) + if err != nil { + utils.Fatalf("%v", err) + } + v3PgConfig := postgres.Config{ + Hostname: ctx.GlobalString(utils.StateDiffV3DBHostFlag.Name), + Port: ctx.GlobalInt(utils.StateDiffV3DBPortFlag.Name), + DatabaseName: ctx.GlobalString(utils.StateDiffV3DBNameFlag.Name), + Username: ctx.GlobalString(utils.StateDiffV3DBUserFlag.Name), + Password: ctx.GlobalString(utils.StateDiffV3DBPasswordFlag.Name), + ID: nodeID, + ClientName: clientName, + Driver: v3DriverType, + } + if ctx.GlobalIsSet(utils.StateDiffV3DBMinConns.Name) { + v3PgConfig.MinConns = ctx.GlobalInt(utils.StateDiffV3DBMinConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffV3DBMaxConns.Name) { + v3PgConfig.MaxConns = ctx.GlobalInt(utils.StateDiffV3DBMaxConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffV3DBMaxIdleConns.Name) { + v3PgConfig.MaxIdle = ctx.GlobalInt(utils.StateDiffV3DBMaxIdleConns.Name) + } + if ctx.GlobalIsSet(utils.StateDiffV3DBMaxConnLifetime.Name) { + v3PgConfig.MaxConnLifetime = ctx.GlobalDuration(utils.StateDiffV3DBMaxConnLifetime.Name) * time.Second + } + if ctx.GlobalIsSet(utils.StateDiffV3DBMaxConnIdleTime.Name) { + v3PgConfig.MaxConnIdleTime = ctx.GlobalDuration(utils.StateDiffV3DBMaxConnIdleTime.Name) * time.Second + } + if ctx.GlobalIsSet(utils.StateDiffV3DBConnTimeout.Name) { + v3PgConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffV3DBConnTimeout.Name) * time.Second + } + + indexerConfig = postgres.MultiConfig{ + V2: v2PgConfig, + V3: v3PgConfig, } - indexerConfig = pgConfig case shared.DUMP: dumpTypeStr := ctx.GlobalString(utils.StateDiffDBDumpDst.Name) dumpType, err := dumpdb.ResolveDumpType(dumpTypeStr) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3dfd2dad0..4b34c9ad4 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -796,6 +796,7 @@ var ( Name: "catalyst", Usage: "Catalyst mode (eth2 integration testing)", } + StateDiffFlag = cli.BoolFlag{ Name: "statediff", Usage: "Enables the processing of state diffs between each block", @@ -805,63 +806,118 @@ var ( Usage: "Statediff database type (current options: postgres, file, dump)", Value: "postgres", } - StateDiffDBDriverTypeFlag = cli.StringFlag{ - Name: "statediff.db.driver", - Usage: "Statediff database driver type", - Value: "pgx", - } StateDiffDBDumpDst = cli.StringFlag{ Name: "statediff.dump.dst", Usage: "Statediff database dump destination (default is stdout)", Value: "stdout", } - StateDiffDBHostFlag = cli.StringFlag{ - Name: "statediff.db.host", - Usage: "Statediff database hostname/ip", + + StateDiffV2DBDriverTypeFlag = cli.StringFlag{ + Name: "statediff.db.v2.driver", + Usage: "Statediff v2 database driver type", + Value: "pgx", + } + StateDiffV2DBHostFlag = cli.StringFlag{ + Name: "statediff.db.v2.host", + Usage: "Statediff v2 database hostname/ip", Value: "localhost", } - StateDiffDBPortFlag = cli.IntFlag{ - Name: "statediff.db.port", - Usage: "Statediff database port", + StateDiffV2DBPortFlag = cli.IntFlag{ + Name: "statediff.db.v2.port", + Usage: "Statediff v2 database port", Value: 5432, } - StateDiffDBNameFlag = cli.StringFlag{ - Name: "statediff.db.name", - Usage: "Statediff database name", + StateDiffV2DBNameFlag = cli.StringFlag{ + Name: "statediff.db.v2.name", + Usage: "Statediff v2 database name", } - StateDiffDBPasswordFlag = cli.StringFlag{ - Name: "statediff.db.password", - Usage: "Statediff database password", + StateDiffV2DBPasswordFlag = cli.StringFlag{ + Name: "statediff.db.v2.password", + Usage: "Statediff v2 database password", } - StateDiffDBUserFlag = cli.StringFlag{ - Name: "statediff.db.user", - Usage: "Statediff database username", + StateDiffV2DBUserFlag = cli.StringFlag{ + Name: "statediff.db.v2.user", + Usage: "Statediff v2 database username", Value: "postgres", } - StateDiffDBMaxConnLifetime = cli.DurationFlag{ - Name: "statediff.db.maxconnlifetime", - Usage: "Statediff database maximum connection lifetime (in seconds)", + StateDiffV2DBMaxConnLifetime = cli.DurationFlag{ + Name: "statediff.db.v2.maxconnlifetime", + Usage: "Statediff v2 database maximum connection lifetime (in seconds)", } - StateDiffDBMaxConnIdleTime = cli.DurationFlag{ - Name: "statediff.db.maxconnidletime", - Usage: "Statediff database maximum connection idle time (in seconds)", + StateDiffV2DBMaxConnIdleTime = cli.DurationFlag{ + Name: "statediff.db.v2.maxconnidletime", + Usage: "Statediff v2 database maximum connection idle time (in seconds)", } - StateDiffDBMaxConns = cli.IntFlag{ - Name: "statediff.db.maxconns", - Usage: "Statediff database maximum connections", + StateDiffV2DBMaxConns = cli.IntFlag{ + Name: "statediff.db.v2.maxconns", + Usage: "Statediff v2 database maximum connections", } - StateDiffDBMinConns = cli.IntFlag{ - Name: "statediff.db.minconns", - Usage: "Statediff database minimum connections", + StateDiffV2DBMinConns = cli.IntFlag{ + Name: "statediff.db.v2.minconns", + Usage: "Statediff v2 database minimum connections", } - StateDiffDBMaxIdleConns = cli.IntFlag{ - Name: "statediff.db.maxidleconns", - Usage: "Statediff database maximum idle connections", + StateDiffV2DBMaxIdleConns = cli.IntFlag{ + Name: "statediff.db.v2.maxidleconns", + Usage: "Statediff v2 database maximum idle connections", } - StateDiffDBConnTimeout = cli.DurationFlag{ - Name: "statediff.db.conntimeout", - Usage: "Statediff database connection timeout (in seconds)", + StateDiffV2DBConnTimeout = cli.DurationFlag{ + Name: "statediff.db.v2.conntimeout", + Usage: "Statediff v2 database connection timeout (in seconds)", } + + StateDiffV3DBDriverTypeFlag = cli.StringFlag{ + Name: "statediff.db.v3.driver", + Usage: "Statediff v3 database driver type", + Value: "pgx", + } + StateDiffV3DBHostFlag = cli.StringFlag{ + Name: "statediff.db.v3.host", + Usage: "Statediff v3 database hostname/ip", + Value: "localhost", + } + StateDiffV3DBPortFlag = cli.IntFlag{ + Name: "statediff.db.v3.port", + Usage: "Statediff v3 database port", + Value: 5432, + } + StateDiffV3DBNameFlag = cli.StringFlag{ + Name: "statediff.db.v3.name", + Usage: "Statediff v3 database name", + } + StateDiffV3DBPasswordFlag = cli.StringFlag{ + Name: "statediff.db.v3.password", + Usage: "Statediff v3 database password", + } + StateDiffV3DBUserFlag = cli.StringFlag{ + Name: "statediff.db.v3.user", + Usage: "Statediff v3 database username", + Value: "postgres", + } + StateDiffV3DBMaxConnLifetime = cli.DurationFlag{ + Name: "statediff.db.v3.maxconnlifetime", + Usage: "Statediff v3 database maximum connection lifetime (in seconds)", + } + StateDiffV3DBMaxConnIdleTime = cli.DurationFlag{ + Name: "statediff.db.v3.maxconnidletime", + Usage: "Statediff v3 database maximum connection idle time (in seconds)", + } + StateDiffV3DBMaxConns = cli.IntFlag{ + Name: "statediff.db.v3.maxconns", + Usage: "Statediff v3 database maximum connections", + } + StateDiffV3DBMinConns = cli.IntFlag{ + Name: "statediff.db.v3.minconns", + Usage: "Statediff v3 database minimum connections", + } + StateDiffV3DBMaxIdleConns = cli.IntFlag{ + Name: "statediff.db.v3.maxidleconns", + Usage: "Statediff v3 database maximum idle connections", + } + StateDiffV3DBConnTimeout = cli.DurationFlag{ + Name: "statediff.db.v3.conntimeout", + Usage: "Statediff v3 database connection timeout (in seconds)", + } + StateDiffDBNodeIDFlag = cli.StringFlag{ Name: "statediff.db.nodeid", Usage: "Node ID to use when writing state diffs to database", -- 2.45.2 From 27e96c4aedf502cd3ecb4b17070025c7dd05f3e8 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 31 Jan 2022 12:36:48 -0600 Subject: [PATCH 4/7] fixes --- cmd/geth/config.go | 4 +- cmd/geth/main.go | 36 +++++++---- cmd/geth/usage.go | 36 +++++++---- go.mod | 2 - go.sum | 2 - statediff/README.md | 47 +++++++++----- statediff/indexer/constructor.go | 2 +- statediff/indexer/database/dump/batch_tx.go | 16 +++-- .../indexer/database/file/indexer_test.go | 2 - statediff/indexer/database/sql/batch_tx.go | 5 +- statediff/indexer/database/sql/indexer.go | 64 ++++++++++--------- .../database/sql/{ => metrics}/metrics.go | 34 ++++++---- .../indexer/database/sql/postgres/pgx.go | 2 + .../indexer/database/sql/postgres/sqlx.go | 10 ++- .../database/sql/postgres/v2/database.go | 4 +- .../indexer/database/sql/test_helpers.go | 4 +- statediff/indexer/database/sql/v2/writer.go | 23 +++---- statediff/indexer/database/sql/v3/writer.go | 23 +++---- statediff/service.go | 7 +- 19 files changed, 193 insertions(+), 130 deletions(-) rename statediff/indexer/database/sql/{ => metrics}/metrics.go (86%) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 35fce6ce8..dcdd07768 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -216,6 +216,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { ClientName: clientName, Driver: v2DriverType, } + fmt.Printf("v2 config: %+v\r\n", v2PgConfig) if ctx.GlobalIsSet(utils.StateDiffV2DBMinConns.Name) { v2PgConfig.MinConns = ctx.GlobalInt(utils.StateDiffV2DBMinConns.Name) } @@ -235,7 +236,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v2PgConfig.ConnTimeout = ctx.GlobalDuration(utils.StateDiffV2DBConnTimeout.Name) * time.Second } - v3DriverTypeStr := ctx.GlobalString(utils.StateDiffV2DBDriverTypeFlag.Name) + v3DriverTypeStr := ctx.GlobalString(utils.StateDiffV3DBDriverTypeFlag.Name) v3DriverType, err := postgres.ResolveDriverType(v3DriverTypeStr) if err != nil { utils.Fatalf("%v", err) @@ -250,6 +251,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { ClientName: clientName, Driver: v3DriverType, } + fmt.Printf("v3 config: %+v\r\n", v3PgConfig) if ctx.GlobalIsSet(utils.StateDiffV3DBMinConns.Name) { v3PgConfig.MinConns = ctx.GlobalInt(utils.StateDiffV3DBMinConns.Name) } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index fc8202c11..476d39582 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -158,19 +158,31 @@ var ( utils.MinerNotifyFullFlag, utils.StateDiffFlag, utils.StateDiffDBTypeFlag, - utils.StateDiffDBDriverTypeFlag, utils.StateDiffDBDumpDst, - utils.StateDiffDBNameFlag, - utils.StateDiffDBPasswordFlag, - utils.StateDiffDBUserFlag, - utils.StateDiffDBHostFlag, - utils.StateDiffDBPortFlag, - utils.StateDiffDBMaxConnLifetime, - utils.StateDiffDBMaxConnIdleTime, - utils.StateDiffDBMaxConns, - utils.StateDiffDBMinConns, - utils.StateDiffDBMaxIdleConns, - utils.StateDiffDBConnTimeout, + utils.StateDiffV2DBDriverTypeFlag, + utils.StateDiffV2DBNameFlag, + utils.StateDiffV2DBPasswordFlag, + utils.StateDiffV2DBUserFlag, + utils.StateDiffV2DBHostFlag, + utils.StateDiffV2DBPortFlag, + utils.StateDiffV2DBMaxConnLifetime, + utils.StateDiffV2DBMaxConnIdleTime, + utils.StateDiffV2DBMaxConns, + utils.StateDiffV2DBMinConns, + utils.StateDiffV2DBMaxIdleConns, + utils.StateDiffV2DBConnTimeout, + utils.StateDiffV3DBDriverTypeFlag, + utils.StateDiffV3DBNameFlag, + utils.StateDiffV3DBPasswordFlag, + utils.StateDiffV3DBUserFlag, + utils.StateDiffV3DBHostFlag, + utils.StateDiffV3DBPortFlag, + utils.StateDiffV3DBMaxConnLifetime, + utils.StateDiffV3DBMaxConnIdleTime, + utils.StateDiffV3DBMaxConns, + utils.StateDiffV3DBMinConns, + utils.StateDiffV3DBMaxIdleConns, + utils.StateDiffV3DBConnTimeout, utils.StateDiffDBNodeIDFlag, utils.StateDiffDBClientNameFlag, utils.StateDiffWritingFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 51b17083b..52e82b60e 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -228,19 +228,31 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Flags: []cli.Flag{ utils.StateDiffFlag, utils.StateDiffDBTypeFlag, - utils.StateDiffDBDriverTypeFlag, utils.StateDiffDBDumpDst, - utils.StateDiffDBNameFlag, - utils.StateDiffDBPasswordFlag, - utils.StateDiffDBUserFlag, - utils.StateDiffDBHostFlag, - utils.StateDiffDBPortFlag, - utils.StateDiffDBMaxConnLifetime, - utils.StateDiffDBMaxConnIdleTime, - utils.StateDiffDBMaxConns, - utils.StateDiffDBMinConns, - utils.StateDiffDBMaxIdleConns, - utils.StateDiffDBConnTimeout, + utils.StateDiffV2DBDriverTypeFlag, + utils.StateDiffV2DBNameFlag, + utils.StateDiffV2DBPasswordFlag, + utils.StateDiffV2DBUserFlag, + utils.StateDiffV2DBHostFlag, + utils.StateDiffV2DBPortFlag, + utils.StateDiffV2DBMaxConnLifetime, + utils.StateDiffV2DBMaxConnIdleTime, + utils.StateDiffV2DBMaxConns, + utils.StateDiffV2DBMinConns, + utils.StateDiffV2DBMaxIdleConns, + utils.StateDiffV2DBConnTimeout, + utils.StateDiffV3DBDriverTypeFlag, + utils.StateDiffV3DBNameFlag, + utils.StateDiffV3DBPasswordFlag, + utils.StateDiffV3DBUserFlag, + utils.StateDiffV3DBHostFlag, + utils.StateDiffV3DBPortFlag, + utils.StateDiffV3DBMaxConnLifetime, + utils.StateDiffV3DBMaxConnIdleTime, + utils.StateDiffV3DBMaxConns, + utils.StateDiffV3DBMinConns, + utils.StateDiffV3DBMaxIdleConns, + utils.StateDiffV3DBConnTimeout, utils.StateDiffDBNodeIDFlag, utils.StateDiffDBClientNameFlag, utils.StateDiffWritingFlag, diff --git a/go.mod b/go.mod index da726b7c4..86a7aaa2f 100644 --- a/go.mod +++ b/go.mod @@ -47,9 +47,7 @@ require ( github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipld-format v0.2.0 - github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect github.com/jackc/pgconn v1.10.0 - github.com/jackc/pgx v3.6.2+incompatible github.com/jackc/pgx/v4 v4.13.0 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e diff --git a/go.sum b/go.sum index cf5e867db..df08fa841 100644 --- a/go.sum +++ b/go.sum @@ -305,8 +305,6 @@ github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9 github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= diff --git a/statediff/README.md b/statediff/README.md index 35d50e39d..897ad74a2 100644 --- a/statediff/README.md +++ b/statediff/README.md @@ -33,7 +33,7 @@ type StateNode struct { NodeType NodeType `json:"nodeType" gencodec:"required"` Path []byte `json:"path" gencodec:"required"` NodeValue []byte `json:"value" gencodec:"required"` - StorageNodes []StorageNode `json:"storage"` + StorageNodes []StorageNode `json:"storage"` LeafKey []byte `json:"leafKey"` } @@ -80,29 +80,42 @@ This service introduces a CLI flag namespace `statediff` `--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database `--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database `--statediff.db.type` is the type of database we write out to (current options: postgres, dump, file) +`--statediff.file.path` full path (including filename) to write statediff data out to when operating in file mode `--statediff.dump.dst` is the destination to write to when operating in database dump mode (stdout, stderr, discard) -`--statediff.db.driver` is the specific driver to use for the database (current options for postgres: pgx and sqlx) -`--statediff.db.host` is the hostname/ip to dial to connect to the database -`--statediff.db.port` is the port to dial to connect to the database -`--statediff.db.name` is the name of the database to connect to -`--statediff.db.user` is the user to connect to the database as -`--statediff.db.password` is the password to use to connect to the database -`--statediff.db.conntimeout` is the connection timeout (in seconds) -`--statediff.db.maxconns` is the maximum number of database connections -`--statediff.db.minconns` is the minimum number of database connections -`--statediff.db.maxidleconns` is the maximum number of idle connections -`--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds) -`--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds) `--statediff.db.nodeid` is the node id to use in the Postgres database `--statediff.db.clientname` is the client name to use in the Postgres database -`--statediff.file.path` full path (including filename) to write statediff data out to when operating in file mode +`--statediff.db.v2.driver` is the specific driver to use for the v2 database (current options for postgres: pgx and sqlx) +`--statediff.db.v2.host` is the hostname/ip to dial to connect to the v2 database +`--statediff.db.v2.port` is the port to dial to connect to the v2 database +`--statediff.db.v2.name` is the name of the v2 database to connect to +`--statediff.db.v2.user` is the user to connect to the v2 database as +`--statediff.db.v2.password` is the password to use to connect to the v2 database +`--statediff.db.v2.conntimeout` is the connection timeout (in seconds) for v2 database +`--statediff.db.v2.maxconns` is the maximum number of database connections for v2 database +`--statediff.db.v2.minconns` is the minimum number of database connections for v2 database +`--statediff.db.v2.maxidleconns` is the maximum number of idle connections for v2 database +`--statediff.db.v2.maxconnidletime` is the maximum lifetime for an idle connection (in seconds) for v2 database +`--statediff.db.v2.maxconnlifetime` is the maximum lifetime for a connection (in seconds) for v2 database +`--statediff.db.v3.driver` is the specific driver to use for the v3 database (current options for postgres: pgx and sqlx) +`--statediff.db.v3.host` is the hostname/ip to dial to connect to the v3 database +`--statediff.db.v3.port` is the port to dial to connect to the v3 database +`--statediff.db.v3.name` is the name of the v3 database to connect to +`--statediff.db.v3.user` is the user to connect to the v3 database as +`--statediff.db.v3.password` is the password to use to connect to the v3 database +`--statediff.db.v3.conntimeout` is the connection timeout (in seconds) for v3 database +`--statediff.db.v3.maxconns` is the maximum number of database connections for v3 database +`--statediff.db.v3.minconns` is the minimum number of database connections for v3 database +`--statediff.db.v3.maxidleconns` is the maximum number of idle connections for v3 database +`--statediff.db.v3.maxconnidletime` is the maximum lifetime for an idle connection (in seconds) for v3 database +`--statediff.db.v3.maxconnlifetime` is the maximum lifetime for a connection (in seconds) for v3 database The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`) e.g. -` -./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=vulcanize_test --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname -` +`./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.nodeid=nodeid --statediff.db.v2.driver=sqlx +--statediff.db.v3.driver=sqlx --statediff.db.v2.host=localhost --statediff.db.v3.host=localhost --statediff.db.v2.port=5432 --statediff.db.v3.port=5432 +--statediff.db.v2.name=vulcanize_dual_v2 --statediff.db.v3.name=vulcanize_dual_v3 --statediff.db.v2.user=postgres --statediff.db.v3.user=postgres +--statediff.db.clientname=clientname --statediff.workers=20` When operating in `--statediff.db.type=file` mode, the service will write SQL statements out to the file designated by `--statediff.file.path`. Please note that it writes out SQL statements with all `ON CONFLICT` constraint checks dropped. diff --git a/statediff/indexer/constructor.go b/statediff/indexer/constructor.go index a8f2d5211..f00af67e1 100644 --- a/statediff/indexer/constructor.go +++ b/statediff/indexer/constructor.go @@ -66,7 +66,7 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, n default: return nil, fmt.Errorf("unrecongized Postgres driver type: %s", pgc.V2.Driver) } - switch pgc.V2.Driver { + switch pgc.V3.Driver { case postgres.PGX: newDriver, err = postgres.NewPGXDriver(ctx, pgc.V3) if err != nil { diff --git a/statediff/indexer/database/dump/batch_tx.go b/statediff/indexer/database/dump/batch_tx.go index 9e001dbca..0d7e5f3b3 100644 --- a/statediff/indexer/database/dump/batch_tx.go +++ b/statediff/indexer/database/dump/batch_tx.go @@ -20,6 +20,8 @@ import ( "fmt" "io" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" blockstore "github.com/ipfs/go-ipfs-blockstore" @@ -32,8 +34,8 @@ type BatchTx struct { BlockNumber uint64 dump io.Writer quit chan struct{} - iplds chan v3.IPLDModel - ipldCache v3.IPLDBatch + iplds chan sharedModels.IPLDModel + ipldCache sharedModels.IPLDBatch submit func(blockTx *BatchTx, err error) error } @@ -47,7 +49,7 @@ func (tx *BatchTx) flush() error { if _, err := fmt.Fprintf(tx.dump, "%+v\r\n", tx.ipldCache); err != nil { return err } - tx.ipldCache = v3.IPLDBatch{} + tx.ipldCache = sharedModels.IPLDBatch{} return nil } @@ -59,21 +61,21 @@ func (tx *BatchTx) cache() { tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key) tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data) case <-tx.quit: - tx.ipldCache = v3.IPLDBatch{} + tx.ipldCache = sharedModels.IPLDBatch{} return } } } func (tx *BatchTx) cacheDirect(key string, value []byte) { - tx.iplds <- v3.IPLDModel{ + tx.iplds <- sharedModels.IPLDModel{ Key: key, Data: value, } } func (tx *BatchTx) cacheIPLD(i node.Node) { - tx.iplds <- v3.IPLDModel{ + tx.iplds <- sharedModels.IPLDModel{ Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(), Data: i.RawData(), } @@ -85,7 +87,7 @@ func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error return "", "", err } prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String() - tx.iplds <- v3.IPLDModel{ + tx.iplds <- sharedModels.IPLDModel{ Key: prefixedKey, Data: raw, } diff --git a/statediff/indexer/database/file/indexer_test.go b/statediff/indexer/database/file/indexer_test.go index e5a030dcf..52bcfea68 100644 --- a/statediff/indexer/database/file/indexer_test.go +++ b/statediff/indexer/database/file/indexer_test.go @@ -24,8 +24,6 @@ import ( "os" "testing" - "github.com/ethereum/go-ethereum/statediff/indexer/models/v2" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/shared" diff --git a/statediff/indexer/database/sql/batch_tx.go b/statediff/indexer/database/sql/batch_tx.go index deec4f07b..d72c7a72c 100644 --- a/statediff/indexer/database/sql/batch_tx.go +++ b/statediff/indexer/database/sql/batch_tx.go @@ -18,6 +18,7 @@ package sql import ( "context" + "fmt" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" @@ -53,11 +54,11 @@ func (tx *BatchTx) Submit(err error) error { func (tx *BatchTx) flush() error { _, err := tx.oldDBTx.Exec(tx.ctx, tx.oldStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) if err != nil { - return err + return fmt.Errorf("error flushing IPLD cache to old DB: %v", err) } _, err = tx.newDBTx.Exec(tx.ctx, tx.newStmt, pq.Array(tx.ipldCache.Keys), pq.Array(tx.ipldCache.Values)) if err != nil { - return err + return fmt.Errorf("error flushing IPLD cache to new DB: %v", err) } tx.ipldCache = modelsShared.IPLDBatch{} return nil diff --git a/statediff/indexer/database/sql/indexer.go b/statediff/indexer/database/sql/indexer.go index 2b67832f1..010a675bb 100644 --- a/statediff/indexer/database/sql/indexer.go +++ b/statediff/indexer/database/sql/indexer.go @@ -27,6 +27,8 @@ import ( "strings" "time" + metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/metrics" + "github.com/ipfs/go-cid" node "github.com/ipfs/go-ipld-format" "github.com/multiformats/go-multihash" @@ -53,8 +55,8 @@ import ( var _ interfaces.StateDiffIndexer = &StateDiffIndexer{} var ( - indexerMetrics = RegisterIndexerMetrics(metrics.DefaultRegistry) - dbMetrics = RegisterDBMetrics(metrics.DefaultRegistry) + indexerMetrics = metrics2.RegisterIndexerMetrics(metrics.DefaultRegistry) + dbMetrics = metrics2.RegisterDBMetrics(metrics.DefaultRegistry) ) // StateDiffIndexer satisfies the indexer.StateDiffIndexer interface for ethereum statediff objects on top of an SQL sql @@ -69,19 +71,19 @@ type StateDiffIndexer struct { func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, info nodeInfo.Info, old, new interfaces.Database) (*StateDiffIndexer, error) { // Write the removed node to the db on init if _, err := old.Exec(ctx, old.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { - return nil, err + return nil, fmt.Errorf("unable to write removed node IPLD to old DB: %v", err) } if _, err := new.Exec(ctx, new.InsertIPLDStm(), shared.RemovedNodeMhKey, []byte{}); err != nil { - return nil, err + return nil, fmt.Errorf("unable to write removed node IPLD to new DB: %v", err) } // Write node info to the db on init oldWriter := v2Writer.NewWriter(old) newWriter := v3Writer.NewWriter(new) if err := oldWriter.InsertNodeInfo(info); err != nil { - return nil, err + return nil, fmt.Errorf("unable to write node info to old DB: %v", err) } if err := newWriter.InsertNodeInfo(info); err != nil { - return nil, err + return nil, fmt.Errorf("unable to write node info to new DB: %v", err) } return &StateDiffIndexer{ ctx: ctx, @@ -171,31 +173,26 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip if err != nil { return nil, 0, err } - defer func() { - if p := recover(); p != nil { - rollback(sdi.ctx, oldTx) - panic(p) - } else if err != nil { - rollback(sdi.ctx, oldTx) - } - }() newTx, err := sdi.newDBWriter.DB.Begin(sdi.ctx) if err != nil { + rollback(sdi.ctx, oldTx) return nil, 0, err } defer func() { if p := recover(); p != nil { rollback(sdi.ctx, newTx) + rollback(sdi.ctx, oldTx) panic(p) } else if err != nil { rollback(sdi.ctx, newTx) + rollback(sdi.ctx, oldTx) } }() blockTx := &BatchTx{ ctx: sdi.ctx, BlockNumber: height, oldStmt: sdi.oldDBWriter.DB.InsertIPLDsStm(), - newStmt: sdi.newDBWriter.DB.InsertStateStm(), + newStmt: sdi.newDBWriter.DB.InsertIPLDsStm(), iplds: make(chan sharedModels.IPLDModel), quit: make(chan struct{}), ipldCache: sharedModels.IPLDBatch{}, @@ -310,10 +307,11 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he baseFee = new(string) *baseFee = header.BaseFee.String() } + mhKey := shared.MultihashKeyFromCID(headerNode.Cid()) // index header headerID, err := sdi.oldDBWriter.InsertHeaderCID(tx.oldDBTx, &v2Models.HeaderModel{ CID: headerNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), + MhKey: mhKey, ParentHash: header.ParentHash.String(), BlockNumber: header.Number.String(), BlockHash: header.Hash().String(), @@ -330,9 +328,9 @@ func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, he if err != nil { return 0, err } - if err := sdi.newDBWriter.InsertHeaderCID(tx.newDBTx, v3Models.HeaderModel{ + if err := sdi.newDBWriter.InsertHeaderCID(tx.newDBTx, &v3Models.HeaderModel{ CID: headerNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(headerNode.Cid()), + MhKey: mhKey, ParentHash: header.ParentHash.String(), BlockNumber: header.Number.String(), BlockHash: header.Hash().String(), @@ -363,10 +361,11 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerHash string, heade } else { uncleReward = shared.CalcUncleMinerReward(blockNumber, uncleNode.Number.Uint64()) } + mhKey := shared.MultihashKeyFromCID(uncleNode.Cid()) if err := sdi.oldDBWriter.InsertUncleCID(tx.oldDBTx, &v2Models.UncleModel{ HeaderID: headerID, CID: uncleNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), + MhKey: mhKey, ParentHash: uncleNode.ParentHash.String(), BlockHash: uncleNode.Hash().String(), Reward: uncleReward.String(), @@ -376,7 +375,7 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerHash string, heade if err := sdi.newDBWriter.InsertUncleCID(tx.newDBTx, &v3Models.UncleModel{ HeaderID: headerHash, CID: uncleNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(uncleNode.Cid()), + MhKey: mhKey, ParentHash: uncleNode.ParentHash.String(), BlockHash: uncleNode.Hash().String(), Reward: uncleReward.String(), @@ -428,15 +427,18 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs if err != nil { return fmt.Errorf("error deriving tx sender: %v", err) } + mhKey := shared.MultihashKeyFromCID(txNode.Cid()) + dst := shared.HandleZeroAddrPointer(trx.To()) + src := shared.HandleZeroAddr(from) txID, err := sdi.oldDBWriter.InsertTransactionCID(tx.oldDBTx, &v2Models.TxModel{ HeaderID: args.headerID, - Dst: shared.HandleZeroAddrPointer(trx.To()), - Src: shared.HandleZeroAddr(from), + Dst: dst, + Src: src, TxHash: txHash, Index: int64(i), Data: trx.Data(), CID: txNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + MhKey: mhKey, Type: trx.Type(), }) if err != nil { @@ -444,13 +446,13 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs } if err := sdi.newDBWriter.InsertTransactionCID(tx.newDBTx, &v3Models.TxModel{ HeaderID: args.headerHash, - Dst: shared.HandleZeroAddrPointer(trx.To()), - Src: shared.HandleZeroAddr(from), + Dst: dst, + Src: src, TxHash: txHash, Index: int64(i), Data: trx.Data(), CID: txNode.Cid().String(), - MhKey: shared.MultihashKeyFromCID(txNode.Cid()), + MhKey: mhKey, Type: trx.Type(), Value: val, }); err != nil { @@ -500,12 +502,13 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs postState = common.Bytes2Hex(receipt.PostState) } + rctMhKey := shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]) rctID, err := sdi.oldDBWriter.InsertReceiptCID(tx.oldDBTx, &v2Models.ReceiptModel{ TxID: txID, Contract: contract, ContractHash: contractHash, LeafCID: args.rctLeafNodeCIDs[i].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), + LeafMhKey: rctMhKey, LogRoot: args.rctNodes[i].LogRoot.String(), PostState: postState, PostStatus: postStatus, @@ -518,7 +521,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs Contract: contract, ContractHash: contractHash, LeafCID: args.rctLeafNodeCIDs[i].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.rctLeafNodeCIDs[i]), + LeafMhKey: rctMhKey, LogRoot: args.rctNodes[i].LogRoot.String(), PostState: postState, PostStatus: postStatus, @@ -540,13 +543,14 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs return fmt.Errorf("invalid log cid") } + logMhKey := shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]) oldLogDataSet[idx] = &v2Models.LogsModel{ ReceiptID: rctID, Address: l.Address.String(), Index: int64(l.Index), Data: l.Data, LeafCID: args.logLeafNodeCIDs[i][idx].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + LeafMhKey: logMhKey, Topic0: topicSet[0], Topic1: topicSet[1], Topic2: topicSet[2], @@ -558,7 +562,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs Index: int64(l.Index), Data: l.Data, LeafCID: args.logLeafNodeCIDs[i][idx].String(), - LeafMhKey: shared.MultihashKeyFromCID(args.logLeafNodeCIDs[i][idx]), + LeafMhKey: logMhKey, Topic0: topicSet[0], Topic1: topicSet[1], Topic2: topicSet[2], diff --git a/statediff/indexer/database/sql/metrics.go b/statediff/indexer/database/sql/metrics/metrics.go similarity index 86% rename from statediff/indexer/database/sql/metrics.go rename to statediff/indexer/database/sql/metrics/metrics.go index f59edcf14..9afc5f4b1 100644 --- a/statediff/indexer/database/sql/metrics.go +++ b/statediff/indexer/database/sql/metrics/metrics.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package sql +package metrics import ( "strings" @@ -41,7 +41,7 @@ func metricName(subsystem, name string) string { return strings.Join(parts, "/") } -type IndexerMetricsHandles struct { +type WriterMetricsHandles struct { // The total number of processed blocks Blocks metrics.Counter // The total number of processed transactions @@ -52,6 +52,9 @@ type IndexerMetricsHandles struct { Logs metrics.Counter // The total number of access list entries processed AccessListEntries metrics.Counter +} + +type IndexerMetricsHandles struct { // Time spent waiting for free postgres tx TimeFreePostgres metrics.Timer // Postgres transaction commit duration @@ -66,13 +69,25 @@ type IndexerMetricsHandles struct { TimeStateStoreCodeProcessing metrics.Timer } +func RegisterWriterMetrics(reg metrics.Registry, version string) WriterMetricsHandles { + ctx := WriterMetricsHandles{ + Blocks: metrics.NewCounter(), + Transactions: metrics.NewCounter(), + Receipts: metrics.NewCounter(), + Logs: metrics.NewCounter(), + AccessListEntries: metrics.NewCounter(), + } + subsys := "writer" + reg.Register(metricName(subsys, version+"/"+"blocks"), ctx.Blocks) + reg.Register(metricName(subsys, version+"/"+"transactions"), ctx.Transactions) + reg.Register(metricName(subsys, version+"/"+"receipts"), ctx.Receipts) + reg.Register(metricName(subsys, version+"/"+"logs"), ctx.Logs) + reg.Register(metricName(subsys, version+"/"+"access_list_entries"), ctx.AccessListEntries) + return ctx +} + func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles { ctx := IndexerMetricsHandles{ - Blocks: metrics.NewCounter(), - Transactions: metrics.NewCounter(), - Receipts: metrics.NewCounter(), - Logs: metrics.NewCounter(), - AccessListEntries: metrics.NewCounter(), TimeFreePostgres: metrics.NewTimer(), TimePostgresCommit: metrics.NewTimer(), TimeHeaderProcessing: metrics.NewTimer(), @@ -81,11 +96,6 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles { TimeStateStoreCodeProcessing: metrics.NewTimer(), } subsys := "indexer" - reg.Register(metricName(subsys, "blocks"), ctx.Blocks) - reg.Register(metricName(subsys, "transactions"), ctx.Transactions) - reg.Register(metricName(subsys, "receipts"), ctx.Receipts) - reg.Register(metricName(subsys, "logs"), ctx.Logs) - reg.Register(metricName(subsys, "access_list_entries"), ctx.AccessListEntries) reg.Register(metricName(subsys, "t_free_postgres"), ctx.TimeFreePostgres) reg.Register(metricName(subsys, "t_postgres_commit"), ctx.TimePostgresCommit) reg.Register(metricName(subsys, "t_header_processing"), ctx.TimeHeaderProcessing) diff --git a/statediff/indexer/database/sql/postgres/pgx.go b/statediff/indexer/database/sql/postgres/pgx.go index b720f7fe5..9911d45bf 100644 --- a/statediff/indexer/database/sql/postgres/pgx.go +++ b/statediff/indexer/database/sql/postgres/pgx.go @@ -25,6 +25,7 @@ import ( "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) @@ -37,6 +38,7 @@ type PGXDriver struct { // NewPGXDriver returns a new pgx driver // it initializes the connection pool and creates the node info table func NewPGXDriver(ctx context.Context, config Config) (*PGXDriver, error) { + log.Info("connecting to database", "connection string", config.DbConnectionString()) pgConf, err := MakeConfig(config) if err != nil { return nil, err diff --git a/statediff/indexer/database/sql/postgres/sqlx.go b/statediff/indexer/database/sql/postgres/sqlx.go index 733c35734..6187c9f38 100644 --- a/statediff/indexer/database/sql/postgres/sqlx.go +++ b/statediff/indexer/database/sql/postgres/sqlx.go @@ -18,12 +18,14 @@ package postgres import ( "context" - coresql "database/sql" "time" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + coresql "database/sql" "github.com/jmoiron/sqlx" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) // SQLXDriver driver, implements sql.Driver @@ -35,7 +37,9 @@ type SQLXDriver struct { // NewSQLXDriver returns a new sqlx driver for Postgres // it initializes the connection pool and creates the node info table func NewSQLXDriver(ctx context.Context, config Config) (*SQLXDriver, error) { - db, err := sqlx.ConnectContext(ctx, "postgres", config.DbConnectionString()) + connStr := config.DbConnectionString() + log.Info("connecting to database", "connection string", connStr) + db, err := sqlx.ConnectContext(ctx, "postgres", connStr) if err != nil { return &SQLXDriver{}, ErrDBConnectionFailed(err) } diff --git a/statediff/indexer/database/sql/postgres/v2/database.go b/statediff/indexer/database/sql/postgres/v2/database.go index f632e09c5..d4ca1efc8 100644 --- a/statediff/indexer/database/sql/postgres/v2/database.go +++ b/statediff/indexer/database/sql/postgres/v2/database.go @@ -37,7 +37,9 @@ type DB struct { // InsertNodeInfoStm satisfies interfaces.Statements func (db *DB) InsertNodeInfoStm() string { return `INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING` + ON CONFLICT (genesis_block, network_id, node_id, chain_id) + DO UPDATE SET client_name = $4 + RETURNING ID` } // InsertHeaderStm satisfies the interfaces.Statements diff --git a/statediff/indexer/database/sql/test_helpers.go b/statediff/indexer/database/sql/test_helpers.go index b1032f8ff..1f392b93f 100644 --- a/statediff/indexer/database/sql/test_helpers.go +++ b/statediff/indexer/database/sql/test_helpers.go @@ -19,10 +19,12 @@ package sql import ( "context" "testing" + + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" ) // TearDownDB is used to tear down the watcher dbs after tests -func TearDownDB(t *testing.T, db Database) { +func TearDownDB(t *testing.T, db interfaces.Database) { ctx := context.Background() tx, err := db.Begin(ctx) if err != nil { diff --git a/statediff/indexer/database/sql/v2/writer.go b/statediff/indexer/database/sql/v2/writer.go index feeb49c56..7b4e2a758 100644 --- a/statediff/indexer/database/sql/v2/writer.go +++ b/statediff/indexer/database/sql/v2/writer.go @@ -20,21 +20,22 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/metrics" + metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/metrics" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/models/v2" "github.com/ethereum/go-ethereum/statediff/indexer/node" ) var ( - nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + writerV2Metrics = metrics2.RegisterWriterMetrics(metrics.DefaultRegistry, "v3") ) // Writer handles processing and writing of indexed IPLD objects to Postgres type Writer struct { - DB interfaces.Database - metrics sql.IndexerMetricsHandles - nodeID int64 + DB interfaces.Database + nodeID int64 } // NewWriter creates a new pointer to a Writer @@ -52,7 +53,7 @@ func (w *Writer) Close() error { /* InsertNodeInfo inserts a node info model INSERT INTO nodes (genesis_block, network_id, node_id, client_name, chain_id) VALUES ($1, $2, $3, $4, $5) -ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING +ON CONFLICT (genesis_block, network_id, node_id, chain_id) DO NOTHING RETURNING ID */ func (w *Writer) InsertNodeInfo(info node.Info) error { var nodeID int64 @@ -79,7 +80,7 @@ func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header *models.HeaderModel) ( if err != nil { return 0, fmt.Errorf("error inserting header_cids entry: %v", err) } - w.metrics.Blocks.Inc(1) + writerV2Metrics.Blocks.Inc(1) return headerID, nil } @@ -110,7 +111,7 @@ func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxMo if err != nil { return 0, fmt.Errorf("error inserting transaction_cids entry: %v", err) } - w.metrics.Transactions.Inc(1) + writerV2Metrics.Transactions.Inc(1) return txID, nil } @@ -125,7 +126,7 @@ func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *mo if err != nil { return fmt.Errorf("error inserting access_list_element entry: %v", err) } - w.metrics.AccessListEntries.Inc(1) + writerV2Metrics.AccessListEntries.Inc(1) return nil } @@ -141,7 +142,7 @@ func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) (i if err != nil { return 0, fmt.Errorf("error inserting receipt_cids entry: %w", err) } - w.metrics.Receipts.Inc(1) + writerV2Metrics.Receipts.Inc(1) return receiptID, nil } @@ -158,7 +159,7 @@ func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error if err != nil { return fmt.Errorf("error inserting logs entry: %w", err) } - w.metrics.Logs.Inc(1) + writerV2Metrics.Logs.Inc(1) } return nil } diff --git a/statediff/indexer/database/sql/v3/writer.go b/statediff/indexer/database/sql/v3/writer.go index 593cb3339..2cda907ad 100644 --- a/statediff/indexer/database/sql/v3/writer.go +++ b/statediff/indexer/database/sql/v3/writer.go @@ -20,21 +20,22 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/metrics" + metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/metrics" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" "github.com/ethereum/go-ethereum/statediff/indexer/node" ) var ( - nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000") + writerV3Metrics = metrics2.RegisterWriterMetrics(metrics.DefaultRegistry, "v3") ) // Writer handles processing and writing of indexed IPLD objects to Postgres type Writer struct { - DB interfaces.Database - metrics sql.IndexerMetricsHandles - nodeID string + DB interfaces.Database + nodeID string } // NewWriter creates a new pointer to a Writer @@ -69,7 +70,7 @@ INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, nod VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash) DO UPDATE SET (block_number, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($1, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, eth.header_cids.times_validated + 1, $16) */ -func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header models.HeaderModel) error { +func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header *models.HeaderModel) error { _, err := tx.Exec(w.DB.Context(), w.DB.InsertHeaderStm(), header.BlockNumber, header.BlockHash, header.ParentHash, header.CID, header.TotalDifficulty, w.nodeID, header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom, @@ -77,7 +78,7 @@ func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header models.HeaderModel) er if err != nil { return fmt.Errorf("error inserting header_cids entry: %v", err) } - w.metrics.Blocks.Inc(1) + writerV3Metrics.Blocks.Inc(1) return nil } @@ -107,7 +108,7 @@ func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxMo if err != nil { return fmt.Errorf("error inserting transaction_cids entry: %v", err) } - w.metrics.Transactions.Inc(1) + writerV3Metrics.Transactions.Inc(1) return nil } @@ -122,7 +123,7 @@ func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *mo if err != nil { return fmt.Errorf("error inserting access_list_element entry: %v", err) } - w.metrics.AccessListEntries.Inc(1) + writerV3Metrics.AccessListEntries.Inc(1) return nil } @@ -137,7 +138,7 @@ func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) er if err != nil { return fmt.Errorf("error inserting receipt_cids entry: %w", err) } - w.metrics.Receipts.Inc(1) + writerV3Metrics.Receipts.Inc(1) return nil } @@ -154,7 +155,7 @@ func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error if err != nil { return fmt.Errorf("error inserting logs entry: %w", err) } - w.metrics.Logs.Inc(1) + writerV3Metrics.Logs.Inc(1) } return nil } diff --git a/statediff/service.go b/statediff/service.go index 88186b99d..46bcb9213 100644 --- a/statediff/service.go +++ b/statediff/service.go @@ -18,6 +18,7 @@ package statediff import ( "bytes" + "fmt" "math/big" "strconv" "strings" @@ -162,7 +163,7 @@ func New(stack *node.Node, ethServ *eth.Ethereum, cfg *ethconfig.Config, params var err error indexer, err = ind.NewStateDiffIndexer(params.Context, blockChain.Config(), info, params.IndexerConfig) if err != nil { - return err + return fmt.Errorf("unable to initialize a new statediff indexer: %v", err) } indexer.ReportOldDBMetrics(10*time.Second, quitCh) indexer.ReportNewDBMetrics(10*time.Second, quitCh) @@ -235,7 +236,7 @@ func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) { chainEventSub := sds.BlockChain.SubscribeChainEvent(chainEventCh) defer chainEventSub.Unsubscribe() errCh := chainEventSub.Err() - var wg sync.WaitGroup + wg := new(sync.WaitGroup) // Process metrics for chain events, then forward to workers chainEventFwd := make(chan core.ChainEvent, chainEventChanSize) wg.Add(1) @@ -266,7 +267,7 @@ func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) { }() wg.Add(int(sds.numWorkers)) for worker := uint(0); worker < sds.numWorkers; worker++ { - params := workerParams{chainEventCh: chainEventFwd, wg: &wg, id: worker} + params := workerParams{chainEventCh: chainEventFwd, wg: wg, id: worker} go sds.writeLoopWorker(params) } wg.Wait() -- 2.45.2 From c4b397de2cfe6abdf99099c410bebb0c9d704aa7 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 31 Jan 2022 12:55:55 -0600 Subject: [PATCH 5/7] fix tests in CI --- .github/workflows/on-pr.yml | 9 +- docker-compose.yml | 18 +++- .../database/file/indexer_legacy_test.go | 5 +- .../indexer/database/file/indexer_test.go | 34 ++++--- .../file/mainnet_tests/indexer_test.go | 5 +- statediff/indexer/database/sql/indexer.go | 3 + .../sql/mainnet_tests/indexer_test.go | 17 +++- .../database/sql/pgx_indexer_legacy_test.go | 9 +- .../indexer/database/sql/pgx_indexer_test.go | 82 +++++----------- .../indexer/database/sql/postgres/config.go | 20 +++- .../indexer/database/sql/postgres/errors.go | 5 - .../indexer/database/sql/postgres/pgx_test.go | 17 +--- .../database/sql/postgres/sqlx_test.go | 18 +--- .../database/sql/postgres/test_helpers.go | 8 +- .../database/sql/sqlx_indexer_legacy_test.go | 10 +- .../indexer/database/sql/sqlx_indexer_test.go | 93 +++++++------------ statediff/indexer/mocks/test_data.go | 6 +- 17 files changed, 161 insertions(+), 198 deletions(-) diff --git a/.github/workflows/on-pr.yml b/.github/workflows/on-pr.yml index af20fb410..8c1a1e974 100644 --- a/.github/workflows/on-pr.yml +++ b/.github/workflows/on-pr.yml @@ -58,9 +58,12 @@ jobs: - name: Checkout code uses: actions/checkout@v2 - - name: Start database - run: docker-compose -f docker-compose.yml up -d ipld-eth-db + - name: Start database v2 + run: docker-compose -f docker-compose.yml up -d ipld-eth-db-v2 + + - name: Start database v3 + run: docker-compose -f docker-compose.yml up -d ipld-eth-db-v3 - name: Run unit tests run: - make statedifftest \ No newline at end of file + make statedifftest diff --git a/docker-compose.yml b/docker-compose.yml index 32dc46147..001c2650a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,21 @@ version: '3.2' services: - ipld-eth-db: + ipld-eth-db-v2: restart: always - image: vulcanize/ipld-eth-db:v0.3.1 + image: vulcanize/ipld-eth-db:v2.0.0 + environment: + POSTGRES_USER: "vdbm" + POSTGRES_DB: "vulcanize_testing_v2" + POSTGRES_PASSWORD: "password" + volumes: + - geth_node:/var/lib/postgresql/data + ports: + - "127.0.0.1:5432:5432" + + ipld-eth-db-v3: + restart: always + image: vulcanize/ipld-eth-db:v3.0.6 environment: POSTGRES_USER: "vdbm" POSTGRES_DB: "vulcanize_testing_v3" @@ -11,7 +23,7 @@ services: volumes: - geth_node:/var/lib/postgresql/data ports: - - "127.0.0.1:5432:5432" + - "127.0.0.1:5433:5432" volumes: geth_node: diff --git a/statediff/indexer/database/file/indexer_legacy_test.go b/statediff/indexer/database/file/indexer_legacy_test.go index 56bca2683..95d968a22 100644 --- a/statediff/indexer/database/file/indexer_legacy_test.go +++ b/statediff/indexer/database/file/indexer_legacy_test.go @@ -52,7 +52,8 @@ func setupLegacy(t *testing.T) { ind, err := file.NewStateDiffIndexer(context.Background(), legacyData.Config, file.TestConfig) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + var headerID int64 + tx, headerID, err = ind.PushBlock( mockLegacyBlock, legacyData.MockReceipts, legacyData.MockBlock.Difficulty()) @@ -67,7 +68,7 @@ func setupLegacy(t *testing.T) { } }() for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String()) + err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String(), headerID) require.NoError(t, err) } diff --git a/statediff/indexer/database/file/indexer_test.go b/statediff/indexer/database/file/indexer_test.go index 52bcfea68..d842f53d3 100644 --- a/statediff/indexer/database/file/indexer_test.go +++ b/statediff/indexer/database/file/indexer_test.go @@ -24,6 +24,9 @@ import ( "os" "testing" + sharedModels "github.com/ethereum/go-ethereum/statediff/indexer/models/shared" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/shared" @@ -170,7 +173,8 @@ func setup(t *testing.T) { ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.TestConfig) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + var headerID int64 + tx, headerID, err = ind.PushBlock( mockBlock, mocks.MockReceipts, mocks.MockBlock.Difficulty()) @@ -186,7 +190,7 @@ func setup(t *testing.T) { } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) + err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), headerID) require.NoError(t, err) } @@ -330,7 +334,7 @@ func TestFileIndexer(t *testing.T) { if txRes.Value != transactions[3].Value().String() { t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) } - accessListElementModels := make([]v2.AccessListElementModel, 0) + accessListElementModels := make([]v3Models.AccessListElementModel, 0) pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC` err = sqlxdb.Select(&accessListElementModels, pgStr, c) if err != nil { @@ -339,11 +343,11 @@ func TestFileIndexer(t *testing.T) { if len(accessListElementModels) != 2 { t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) } - model1 := v2.AccessListElementModel{ + model1 := v3Models.AccessListElementModel{ Index: accessListElementModels[0].Index, Address: accessListElementModels[0].Address, } - model2 := v2.AccessListElementModel{ + model2 := v3Models.AccessListElementModel{ Index: accessListElementModels[1].Index, Address: accessListElementModels[1].Address, StorageKeys: accessListElementModels[1].StorageKeys, @@ -446,7 +450,7 @@ func TestFileIndexer(t *testing.T) { expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) for idx, c := range rcts { - result := make([]v3.IPLDModel, 0) + result := make([]sharedModels.IPLDModel, 0) pgStr = `SELECT data FROM eth.receipt_cids INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) @@ -530,7 +534,7 @@ func TestFileIndexer(t *testing.T) { defer tearDown(t) // check that state nodes were properly indexed and published - stateNodes := make([]v2.StateNodeModel, 0) + stateNodes := make([]v3Models.StateNodeModel, 0) pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type != 3` @@ -552,7 +556,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account v2.StateAccountModel + var account v3Models.StateAccountModel err = sqlxdb.Get(&account, pgStr, stateNode.HeaderID, stateNode.Path) if err != nil { t.Fatal(err) @@ -562,7 +566,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.ContractLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x06'}) test_helpers.ExpectEqual(t, data, mocks.ContractLeafNode) - test_helpers.ExpectEqual(t, account, v2.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "0", @@ -576,7 +580,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, stateNode.StateKey, common.BytesToHash(mocks.AccountLeafKey).Hex()) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x0c'}) test_helpers.ExpectEqual(t, data, mocks.AccountLeafNode) - test_helpers.ExpectEqual(t, account, v2.StateAccountModel{ + test_helpers.ExpectEqual(t, account, v3Models.StateAccountModel{ HeaderID: account.HeaderID, StatePath: stateNode.Path, Balance: "1000", @@ -588,7 +592,7 @@ func TestFileIndexer(t *testing.T) { } // check that Removed state nodes were properly indexed and published - stateNodes = make([]v2.StateNodeModel, 0) + stateNodes = make([]v3Models.StateNodeModel, 0) pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type = 3` @@ -621,7 +625,7 @@ func TestFileIndexer(t *testing.T) { defer tearDown(t) // check that storage nodes were properly indexed - storageNodes := make([]v2.StorageNodeWithStateKeyModel, 0) + storageNodes := make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr := `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -633,7 +637,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: storageCID.String(), NodeType: 2, StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), @@ -654,7 +658,7 @@ func TestFileIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed - storageNodes = make([]v2.StorageNodeWithStateKeyModel, 0) + storageNodes = make([]v3Models.StorageNodeWithStateKeyModel, 0) pgStr = `SELECT storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path FROM eth.storage_cids, eth.state_cids, eth.header_cids WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) @@ -666,7 +670,7 @@ func TestFileIndexer(t *testing.T) { t.Fatal(err) } test_helpers.ExpectEqual(t, len(storageNodes), 1) - test_helpers.ExpectEqual(t, storageNodes[0], v2.StorageNodeWithStateKeyModel{ + test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: shared.RemovedNodeStorageCID, NodeType: 3, StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), diff --git a/statediff/indexer/database/file/mainnet_tests/indexer_test.go b/statediff/indexer/database/file/mainnet_tests/indexer_test.go index b297b82d1..59104973c 100644 --- a/statediff/indexer/database/file/mainnet_tests/indexer_test.go +++ b/statediff/indexer/database/file/mainnet_tests/indexer_test.go @@ -88,7 +88,8 @@ func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { ind, err := file.NewStateDiffIndexer(context.Background(), chainConf, file.TestConfig) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + var headerID int64 + tx, headerID, err = ind.PushBlock( testBlock, testReceipts, testBlock.Difficulty()) @@ -103,7 +104,7 @@ func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, testBlock.Hash().String()) + err = ind.PushStateNode(tx, node, testBlock.Hash().String(), headerID) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/indexer.go b/statediff/indexer/database/sql/indexer.go index 010a675bb..07f4e1ff4 100644 --- a/statediff/indexer/database/sql/indexer.go +++ b/statediff/indexer/database/sql/indexer.go @@ -629,6 +629,9 @@ func (sdi *StateDiffIndexer) PushStateNode(batch interfaces.Batch, stateNode sdt MhKey: stateMhKey, NodeType: stateNode.NodeType.Int(), }) + if err != nil { + return err + } if err := sdi.newDBWriter.InsertStateCID(tx.newDBTx, &v3Models.StateNodeModel{ HeaderID: headerHash, Path: stateNode.Path, diff --git a/statediff/indexer/database/sql/mainnet_tests/indexer_test.go b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go index 68c9bc464..d841bc360 100644 --- a/statediff/indexer/database/sql/mainnet_tests/indexer_test.go +++ b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go @@ -23,6 +23,8 @@ import ( "os" "testing" + nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node" + "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/core/types" @@ -36,7 +38,7 @@ import ( var ( err error - db sql.Database + db interfaces.Database ind interfaces.StateDiffIndexer chainConf = params.MainnetChainConfig ) @@ -76,14 +78,19 @@ func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Rece } func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { - db, err = postgres.SetupSQLXDB() + db, err = postgres.SetupV3SQLXDB() if err != nil { t.Fatal(err) } - ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, db) + dbV2, err := postgres.SetupV2SQLXDB() + if err != nil { + t.Fatal(err) + } + ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, nodeinfo.Info{}, dbV2, db) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + var headerID int64 + tx, headerID, err = ind.PushBlock( testBlock, testReceipts, testBlock.Difficulty()) @@ -95,7 +102,7 @@ func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, testBlock.Hash().String()) + err = ind.PushStateNode(tx, node, testBlock.Hash().String(), headerID) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go index 37ea11881..717aa8591 100644 --- a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go @@ -39,10 +39,13 @@ func setupLegacyPGX(t *testing.T) { db, err = postgres.SetupV3PGXDB() require.NoError(t, err) - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil) + v2DB, err := postgres.SetupV2PGXDB() + require.NoError(t, err) + + ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, v2DB, db) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + tx, headerID, err = ind.PushBlock( mockLegacyBlock, legacyData.MockReceipts, legacyData.MockBlock.Difficulty()) @@ -54,7 +57,7 @@ func setupLegacyPGX(t *testing.T) { } }() for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String(), 0) + err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String(), headerID) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/pgx_indexer_test.go b/statediff/indexer/database/sql/pgx_indexer_test.go index deb4bd6c6..393cd446e 100644 --- a/statediff/indexer/database/sql/pgx_indexer_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_test.go @@ -41,26 +41,26 @@ import ( func setupPGX(t *testing.T) { db, err = postgres.SetupV3PGXDB() - if err != nil { - t.Fatal(err) - } - ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeInfo.Info{}, db, nil) + require.NoError(t, err) + + v2DB, err := postgres.SetupV2PGXDB() + require.NoError(t, err) + + ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeInfo.Info{}, v2DB, db) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + tx, headerID, err = ind.PushBlock( mockBlock, mocks.MockReceipts, mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { if err := tx.Submit(err); err != nil { t.Fatal(err) } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0) + err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), headerID) require.NoError(t, err) } @@ -89,24 +89,18 @@ func TestPGXIndexer(t *testing.T) { &header.Reward, &header.BlockHash, &header.Coinbase) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, header.CID, headerCID.String()) test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String()) test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250") test_helpers.ExpectEqual(t, header.Coinbase, mocks.MockHeader.Coinbase.String()) dc, err := cid.Decode(header.CID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp) }) @@ -118,9 +112,7 @@ func TestPGXIndexer(t *testing.T) { pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1` err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(trxs), 5) expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) @@ -251,9 +243,7 @@ func TestPGXIndexer(t *testing.T) { INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` err = db.Select(context.Background(), &rcts, rctsPgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(rcts) != len(mocks.MockReceipts) { t.Fatalf("expected %d receipts, got %d", len(mocks.MockReceipts), len(rcts)) } @@ -304,9 +294,7 @@ func TestPGXIndexer(t *testing.T) { AND transaction_cids.header_id = header_cids.block_hash AND header_cids.block_number = $1 order by transaction_cids.index` err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(rcts), 5) expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String())) expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String())) @@ -403,9 +391,7 @@ func TestPGXIndexer(t *testing.T) { FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type != 3` err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(stateNodes), 2) for _, stateNode := range stateNodes { var data []byte @@ -461,23 +447,17 @@ func TestPGXIndexer(t *testing.T) { FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1 AND node_type = 3` err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(stateNodes), 1) stateNode := stateNodes[0] var data []byte dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey) err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, stateNode.CID, shared.RemovedNodeStateCID) test_helpers.ExpectEqual(t, stateNode.Path, []byte{'\x02'}) test_helpers.ExpectEqual(t, data, []byte{}) @@ -495,9 +475,7 @@ func TestPGXIndexer(t *testing.T) { AND header_cids.block_number = $1 AND storage_cids.node_type != 3` err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(storageNodes), 1) test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: storageCID.String(), @@ -508,15 +486,11 @@ func TestPGXIndexer(t *testing.T) { }) var data []byte dc, err := cid.Decode(storageNodes[0].CID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, data, mocks.StorageLeafNode) // check that Removed storage nodes were properly indexed @@ -528,9 +502,7 @@ func TestPGXIndexer(t *testing.T) { AND header_cids.block_number = $1 AND storage_cids.node_type = 3` err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, len(storageNodes), 1) test_helpers.ExpectEqual(t, storageNodes[0], v3Models.StorageNodeWithStateKeyModel{ CID: shared.RemovedNodeStorageCID, @@ -540,16 +512,12 @@ func TestPGXIndexer(t *testing.T) { Path: []byte{'\x03'}, }) dc, err = cid.Decode(storageNodes[0].CID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) mhKey = dshelp.MultihashToDsKey(dc.Hash()) prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() test_helpers.ExpectEqual(t, prefixedKey, shared.RemovedNodeMhKey) err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, data, []byte{}) }) } diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go index 3ef5c71c9..bcaf4e0bf 100644 --- a/statediff/indexer/database/sql/postgres/config.go +++ b/statediff/indexer/database/sql/postgres/config.go @@ -45,7 +45,7 @@ func ResolveDriverType(str string) (DriverType, error) { } } -// DefaultConfig are default parameters for connecting to a Postgres sql +// DefaultConfig are default parameters for connecting to a Postgres DB var DefaultConfig = Config{ Hostname: "localhost", Port: 5432, @@ -54,6 +54,24 @@ var DefaultConfig = Config{ Password: "password", } +// DefaultV2Config are default parameters for connecting to a v3 Postgres DB +var DefaultV2Config = Config{ + Hostname: "localhost", + Port: 5432, + DatabaseName: "vulcanize_testing_v2", + Username: "vdbm", + Password: "password", +} + +// DefaultV3Config are default parameters for connecting to a v3 Postgres DB +var DefaultV3Config = Config{ + Hostname: "localhost", + Port: 5433, + DatabaseName: "vulcanize_testing_v3", + Username: "vdbm", + Password: "password", +} + // MultiConfig holds multiple configs type MultiConfig struct { V2 Config diff --git a/statediff/indexer/database/sql/postgres/errors.go b/statediff/indexer/database/sql/postgres/errors.go index effa74aa1..a77cdcb8e 100644 --- a/statediff/indexer/database/sql/postgres/errors.go +++ b/statediff/indexer/database/sql/postgres/errors.go @@ -22,17 +22,12 @@ import ( const ( DbConnectionFailedMsg = "db connection failed" - SettingNodeFailedMsg = "unable to set db node" ) func ErrDBConnectionFailed(connectErr error) error { return formatError(DbConnectionFailedMsg, connectErr.Error()) } -func ErrUnableToSetNode(setErr error) error { - return formatError(SettingNodeFailedMsg, setErr.Error()) -} - func formatError(msg, err string) error { return fmt.Errorf("%s: %s", msg, err) } diff --git a/statediff/indexer/database/sql/postgres/pgx_test.go b/statediff/indexer/database/sql/postgres/pgx_test.go index 64616e356..750acfcbc 100644 --- a/statediff/indexer/database/sql/postgres/pgx_test.go +++ b/statediff/indexer/database/sql/postgres/pgx_test.go @@ -18,7 +18,6 @@ package postgres_test import ( "context" - "fmt" "math/big" "strings" "testing" @@ -26,7 +25,6 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) @@ -98,24 +96,11 @@ func TestPostgresPGX(t *testing.T) { }) t.Run("throws error when can't connect to the database", func(t *testing.T) { - goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} - _, err := postgres.NewPGXDriver(ctx, postgres.Config{}, goodInfo) + _, err := postgres.NewPGXDriver(ctx, postgres.Config{}) if err == nil { t.Fatal("Expected an error") } expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg) }) - - t.Run("throws error when can't create node", func(t *testing.T) { - badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) - badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} - - _, err := postgres.NewPGXDriver(ctx, postgres.DefaultConfig, badInfo) - if err == nil { - t.Fatal("Expected an error") - } - - expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg) - }) } diff --git a/statediff/indexer/database/sql/postgres/sqlx_test.go b/statediff/indexer/database/sql/postgres/sqlx_test.go index 03f24e9f5..4124a5cdd 100644 --- a/statediff/indexer/database/sql/postgres/sqlx_test.go +++ b/statediff/indexer/database/sql/postgres/sqlx_test.go @@ -17,16 +17,13 @@ package postgres_test import ( - "fmt" "math/big" - "strings" "testing" "github.com/jmoiron/sqlx" _ "github.com/lib/pq" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) @@ -96,24 +93,11 @@ func TestPostgresSQLX(t *testing.T) { }) t.Run("throws error when can't connect to the database", func(t *testing.T) { - goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"} - _, err := postgres.NewSQLXDriver(ctx, postgres.Config{}, goodInfo) + _, err := postgres.NewSQLXDriver(ctx, postgres.Config{}) if err == nil { t.Fatal("Expected an error") } expectContainsSubstring(t, err.Error(), postgres.DbConnectionFailedMsg) }) - - t.Run("throws error when can't create node", func(t *testing.T) { - badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100)) - badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"} - - _, err := postgres.NewSQLXDriver(ctx, postgres.DefaultConfig, badInfo) - if err == nil { - t.Fatal("Expected an error") - } - - expectContainsSubstring(t, err.Error(), postgres.SettingNodeFailedMsg) - }) } diff --git a/statediff/indexer/database/sql/postgres/test_helpers.go b/statediff/indexer/database/sql/postgres/test_helpers.go index db29584b4..8a0ea021e 100644 --- a/statediff/indexer/database/sql/postgres/test_helpers.go +++ b/statediff/indexer/database/sql/postgres/test_helpers.go @@ -26,7 +26,7 @@ import ( // SetupV3SQLXDB is used to setup a sqlx db for tests func SetupV3SQLXDB() (interfaces.Database, error) { - driver, err := NewSQLXDriver(context.Background(), DefaultConfig) + driver, err := NewSQLXDriver(context.Background(), DefaultV3Config) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SetupV3SQLXDB() (interfaces.Database, error) { // SetupV3PGXDB is used to setup a pgx db for tests func SetupV3PGXDB() (interfaces.Database, error) { - driver, err := NewPGXDriver(context.Background(), DefaultConfig) + driver, err := NewPGXDriver(context.Background(), DefaultV3Config) if err != nil { return nil, err } @@ -44,7 +44,7 @@ func SetupV3PGXDB() (interfaces.Database, error) { // SetupV2SQLXDB is used to setup a sqlx db for tests func SetupV2SQLXDB() (interfaces.Database, error) { - driver, err := NewSQLXDriver(context.Background(), DefaultConfig) + driver, err := NewSQLXDriver(context.Background(), DefaultV2Config) if err != nil { return nil, err } @@ -53,7 +53,7 @@ func SetupV2SQLXDB() (interfaces.Database, error) { // SetupV2PGXDB is used to setup a pgx db for tests func SetupV2PGXDB() (interfaces.Database, error) { - driver, err := NewPGXDriver(context.Background(), DefaultConfig) + driver, err := NewPGXDriver(context.Background(), DefaultV2Config) if err != nil { return nil, err } diff --git a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go index 95d03cb8e..6c14ab3ba 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go @@ -40,6 +40,7 @@ var ( legacyData = mocks.NewLegacyData() mockLegacyBlock *types.Block legacyHeaderCID cid.Cid + headerID int64 ) func setupLegacySQLX(t *testing.T) { @@ -49,10 +50,13 @@ func setupLegacySQLX(t *testing.T) { db, err = postgres.SetupV3SQLXDB() require.NoError(t, err) - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, db, nil) + v2DB, err := postgres.SetupV2PGXDB() + require.NoError(t, err) + + ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, nodeinfo.Info{}, v2DB, db) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + tx, headerID, err = ind.PushBlock( mockLegacyBlock, legacyData.MockReceipts, legacyData.MockBlock.Difficulty()) @@ -64,7 +68,7 @@ func setupLegacySQLX(t *testing.T) { } }() for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String(), 0) + err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String(), headerID) require.NoError(t, err) } diff --git a/statediff/indexer/database/sql/sqlx_indexer_test.go b/statediff/indexer/database/sql/sqlx_indexer_test.go index cd8ed4e33..9d86b83fd 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_test.go @@ -42,26 +42,26 @@ import ( func setupSQLX(t *testing.T) { db, err = postgres.SetupV3SQLXDB() - if err != nil { - t.Fatal(err) - } - ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeinfo.Info{}, db, nil) + require.NoError(t, err) + + v2DB, err := postgres.SetupV2PGXDB() + require.NoError(t, err) + + ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, nodeinfo.Info{}, v2DB, db) require.NoError(t, err) var tx interfaces.Batch - tx, err = ind.PushBlock( + tx, headerID, err = ind.PushBlock( mockBlock, mocks.MockReceipts, mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { if err := tx.Submit(err); err != nil { t.Fatal(err) } }() for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), 0) + err = ind.PushStateNode(tx, node, mockBlock.Hash().String(), headerID) require.NoError(t, err) } @@ -92,24 +92,21 @@ func TestSQLXIndexer(t *testing.T) { } header := new(res) err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).(*sqlx.Row).StructScan(header) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + test_helpers.ExpectEqual(t, header.CID, headerCID.String()) test_helpers.ExpectEqual(t, header.TD, mocks.MockBlock.Difficulty().String()) test_helpers.ExpectEqual(t, header.Reward, "2000000000000021250") test_helpers.ExpectEqual(t, header.Coinbase, mocks.MockHeader.Coinbase.String()) dc, err := cid.Decode(header.CID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + test_helpers.ExpectEqual(t, data, mocks.MockHeaderRlp) }) @@ -121,9 +118,8 @@ func TestSQLXIndexer(t *testing.T) { pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) WHERE header_cids.block_number = $1` err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + test_helpers.ExpectEqual(t, len(trxs), 5) expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) @@ -138,25 +134,22 @@ func TestSQLXIndexer(t *testing.T) { } for _, c := range trxs { dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + mhKey := dshelp.MultihashToDsKey(dc.Hash()) prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() var data []byte err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + txTypeAndValueStr := `SELECT tx_type, value FROM eth.transaction_cids WHERE cid = $1` switch c { case trx1CID.String(): test_helpers.ExpectEqual(t, data, tx1) txRes := new(txResult) err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + if txRes.TxType != 0 { t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) } @@ -167,9 +160,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, tx2) txRes := new(txResult) err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if txRes.TxType != 0 { t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) } @@ -180,9 +171,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, tx3) txRes := new(txResult) err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if txRes.TxType != 0 { t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) } @@ -193,9 +182,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, tx4) txRes := new(txResult) err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if txRes.TxType != types.AccessListTxType { t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType) } @@ -205,9 +192,7 @@ func TestSQLXIndexer(t *testing.T) { accessListElementModels := make([]v3Models.AccessListElementModel, 0) pgStr = `SELECT access_list_elements.* FROM eth.access_list_elements INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC` err = db.Select(context.Background(), &accessListElementModels, pgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if len(accessListElementModels) != 2 { t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) } @@ -226,9 +211,7 @@ func TestSQLXIndexer(t *testing.T) { test_helpers.ExpectEqual(t, data, tx5) txRes := new(txResult) err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if txRes.TxType != types.DynamicFeeTxType { t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType) } @@ -354,41 +337,31 @@ func TestSQLXIndexer(t *testing.T) { var postStatus uint64 pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` err = db.Get(context.Background(), &postStatus, pgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, postStatus, mocks.ExpectedPostStatus) case rct2CID.String(): test_helpers.ExpectEqual(t, data, rctLeaf2) var postState string err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState1) case rct3CID.String(): test_helpers.ExpectEqual(t, data, rctLeaf3) var postState string err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState2) case rct4CID.String(): test_helpers.ExpectEqual(t, data, rctLeaf4) var postState string err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3) case rct5CID.String(): test_helpers.ExpectEqual(t, data, rctLeaf5) var postState string err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test_helpers.ExpectEqual(t, postState, mocks.ExpectedPostState3) } } diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go index e10c7d977..f302acc70 100644 --- a/statediff/indexer/mocks/test_data.go +++ b/statediff/indexer/mocks/test_data.go @@ -22,6 +22,8 @@ import ( "crypto/rand" "math/big" + v3Models "github.com/ethereum/go-ethereum/statediff/indexer/models/v3" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -104,11 +106,11 @@ var ( Address: AnotherAddress, StorageKeys: []common.Hash{common.BytesToHash(StorageLeafKey), common.BytesToHash(MockStorageLeafKey)}, } - AccessListEntry1Model = v2.AccessListElementModel{ + AccessListEntry1Model = v3Models.AccessListElementModel{ Index: 0, Address: Address.Hex(), } - AccessListEntry2Model = v2.AccessListElementModel{ + AccessListEntry2Model = v3Models.AccessListElementModel{ Index: 1, Address: AnotherAddress.Hex(), StorageKeys: []string{common.BytesToHash(StorageLeafKey).Hex(), common.BytesToHash(MockStorageLeafKey).Hex()}, -- 2.45.2 From 844d1719dbd33406e938ebe8aef9609bdb75a4b6 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 31 Jan 2022 13:22:28 -0600 Subject: [PATCH 6/7] bump version meta --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 6a5ab4b1d..ef0c8a94e 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release VersionPatch = 15 // Patch version component of the current release - VersionMeta = "statediff-3.0.1" // Version metadata to append to the version string + VersionMeta = "statediff-3.1.0" // Version metadata to append to the version string ) // Version holds the textual version string. -- 2.45.2 From f749c8abc48f27e25fc070d7870421dbd83ebd37 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 31 Jan 2022 14:06:45 -0600 Subject: [PATCH 7/7] fix CI --- docker-compose.yml | 11 +++++----- .../indexer/database/sql/postgres/config.go | 4 ++-- statediff/indexer/database/sql/v2/writer.go | 20 +++++++++---------- statediff/indexer/database/sql/v3/writer.go | 14 ++++++------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 001c2650a..a7276b7bc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,9 +9,9 @@ services: POSTGRES_DB: "vulcanize_testing_v2" POSTGRES_PASSWORD: "password" volumes: - - geth_node:/var/lib/postgresql/data + - v2_data:/var/lib/postgresql/data ports: - - "127.0.0.1:5432:5432" + - "127.0.0.1:5433:5432" ipld-eth-db-v3: restart: always @@ -21,9 +21,10 @@ services: POSTGRES_DB: "vulcanize_testing_v3" POSTGRES_PASSWORD: "password" volumes: - - geth_node:/var/lib/postgresql/data + - v3_data:/var/lib/postgresql/data ports: - - "127.0.0.1:5433:5432" + - "127.0.0.1:5432:5432" volumes: - geth_node: + v2_data: + v3_data: diff --git a/statediff/indexer/database/sql/postgres/config.go b/statediff/indexer/database/sql/postgres/config.go index bcaf4e0bf..32259a11b 100644 --- a/statediff/indexer/database/sql/postgres/config.go +++ b/statediff/indexer/database/sql/postgres/config.go @@ -57,7 +57,7 @@ var DefaultConfig = Config{ // DefaultV2Config are default parameters for connecting to a v3 Postgres DB var DefaultV2Config = Config{ Hostname: "localhost", - Port: 5432, + Port: 5433, DatabaseName: "vulcanize_testing_v2", Username: "vdbm", Password: "password", @@ -66,7 +66,7 @@ var DefaultV2Config = Config{ // DefaultV3Config are default parameters for connecting to a v3 Postgres DB var DefaultV3Config = Config{ Hostname: "localhost", - Port: 5433, + Port: 5432, DatabaseName: "vulcanize_testing_v3", Username: "vdbm", Password: "password", diff --git a/statediff/indexer/database/sql/v2/writer.go b/statediff/indexer/database/sql/v2/writer.go index 7b4e2a758..a88a02044 100644 --- a/statediff/indexer/database/sql/v2/writer.go +++ b/statediff/indexer/database/sql/v2/writer.go @@ -78,7 +78,7 @@ func (w *Writer) InsertHeaderCID(tx interfaces.Tx, header *models.HeaderModel) ( header.Reward, header.StateRoot, header.TxRoot, header.RctRoot, header.UncleRoot, header.Bloom, header.Timestamp, header.MhKey, 1, header.BaseFee).Scan(&headerID) if err != nil { - return 0, fmt.Errorf("error inserting header_cids entry: %v", err) + return 0, fmt.Errorf("v2 error inserting header_cids entry: %v", err) } writerV2Metrics.Blocks.Inc(1) return headerID, nil @@ -93,7 +93,7 @@ func (w *Writer) InsertUncleCID(tx interfaces.Tx, uncle *models.UncleModel) erro _, err := tx.Exec(w.DB.Context(), w.DB.InsertUncleStm(), uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID, uncle.Reward, uncle.MhKey) if err != nil { - return fmt.Errorf("error inserting uncle_cids entry: %v", err) + return fmt.Errorf("v2 error inserting uncle_cids entry: %v", err) } return nil } @@ -107,9 +107,9 @@ func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxMo var txID int64 err := tx.QueryRow(w.DB.Context(), w.DB.InsertTxStm(), transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, - transaction.MhKey, transaction.Data, transaction.Type).Scan(&txID) + transaction.MhKey, transaction.Data, []byte{transaction.Type}).Scan(&txID) if err != nil { - return 0, fmt.Errorf("error inserting transaction_cids entry: %v", err) + return 0, fmt.Errorf("v2 error inserting transaction_cids entry: %v", err) } writerV2Metrics.Transactions.Inc(1) return txID, nil @@ -124,7 +124,7 @@ func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *mo _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(), accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) if err != nil { - return fmt.Errorf("error inserting access_list_element entry: %v", err) + return fmt.Errorf("v2 error inserting access_list_element entry: %v", err) } writerV2Metrics.AccessListEntries.Inc(1) return nil @@ -140,7 +140,7 @@ func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) (i err := tx.QueryRow(w.DB.Context(), w.DB.InsertRctStm(), rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot).Scan(&receiptID) if err != nil { - return 0, fmt.Errorf("error inserting receipt_cids entry: %w", err) + return 0, fmt.Errorf("v2 error inserting receipt_cids entry: %w", err) } writerV2Metrics.Receipts.Inc(1) return receiptID, nil @@ -157,7 +157,7 @@ func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) if err != nil { - return fmt.Errorf("error inserting logs entry: %w", err) + return fmt.Errorf("v2 error inserting logs entry: %w", err) } writerV2Metrics.Logs.Inc(1) } @@ -178,7 +178,7 @@ func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeMod err := tx.QueryRow(w.DB.Context(), w.DB.InsertStateStm(), stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey).Scan(&stateID) if err != nil { - return 0, fmt.Errorf("error inserting state_cids entry: %v", err) + return 0, fmt.Errorf("v2 error inserting state_cids entry: %v", err) } return stateID, nil } @@ -193,7 +193,7 @@ func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.State stateAccount.StateID, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) if err != nil { - return fmt.Errorf("error inserting state_accounts entry: %v", err) + return fmt.Errorf("v2 error inserting state_accounts entry: %v", err) } return nil } @@ -212,7 +212,7 @@ func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNo storageCID.StateID, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) if err != nil { - return fmt.Errorf("error inserting storage_cids entry: %v", err) + return fmt.Errorf("v2 error inserting storage_cids entry: %v", err) } return nil } diff --git a/statediff/indexer/database/sql/v3/writer.go b/statediff/indexer/database/sql/v3/writer.go index 2cda907ad..4cf4a5581 100644 --- a/statediff/indexer/database/sql/v3/writer.go +++ b/statediff/indexer/database/sql/v3/writer.go @@ -106,7 +106,7 @@ func (w *Writer) InsertTransactionCID(tx interfaces.Tx, transaction *models.TxMo transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst, transaction.Src, transaction.Index, transaction.MhKey, transaction.Data, transaction.Type, transaction.Value) if err != nil { - return fmt.Errorf("error inserting transaction_cids entry: %v", err) + return fmt.Errorf("v3 error inserting transaction_cids entry: %v", err) } writerV3Metrics.Transactions.Inc(1) return nil @@ -121,7 +121,7 @@ func (w *Writer) InsertAccessListElement(tx interfaces.Tx, accessListElement *mo _, err := tx.Exec(w.DB.Context(), w.DB.InsertAccessListElementStm(), accessListElement.TxID, accessListElement.Index, accessListElement.Address, accessListElement.StorageKeys) if err != nil { - return fmt.Errorf("error inserting access_list_element entry: %v", err) + return fmt.Errorf("v3 error inserting access_list_element entry: %v", err) } writerV3Metrics.AccessListEntries.Inc(1) return nil @@ -136,7 +136,7 @@ func (w *Writer) InsertReceiptCID(tx interfaces.Tx, rct *models.ReceiptModel) er _, err := tx.Exec(w.DB.Context(), w.DB.InsertRctStm(), rct.TxID, rct.LeafCID, rct.Contract, rct.ContractHash, rct.LeafMhKey, rct.PostState, rct.PostStatus, rct.LogRoot) if err != nil { - return fmt.Errorf("error inserting receipt_cids entry: %w", err) + return fmt.Errorf("v3 error inserting receipt_cids entry: %w", err) } writerV3Metrics.Receipts.Inc(1) return nil @@ -153,7 +153,7 @@ func (w *Writer) InsertLogCID(tx interfaces.Tx, logs []*models.LogsModel) error log.LeafCID, log.LeafMhKey, log.ReceiptID, log.Address, log.Index, log.Topic0, log.Topic1, log.Topic2, log.Topic3, log.Data) if err != nil { - return fmt.Errorf("error inserting logs entry: %w", err) + return fmt.Errorf("v3 error inserting logs entry: %w", err) } writerV3Metrics.Logs.Inc(1) } @@ -173,7 +173,7 @@ func (w *Writer) InsertStateCID(tx interfaces.Tx, stateNode *models.StateNodeMod _, err := tx.Exec(w.DB.Context(), w.DB.InsertStateStm(), stateNode.HeaderID, stateKey, stateNode.CID, stateNode.Path, stateNode.NodeType, true, stateNode.MhKey) if err != nil { - return fmt.Errorf("error inserting state_cids entry: %v", err) + return fmt.Errorf("v3 error inserting state_cids entry: %v", err) } return nil } @@ -188,7 +188,7 @@ func (w *Writer) InsertStateAccount(tx interfaces.Tx, stateAccount *models.State stateAccount.HeaderID, stateAccount.StatePath, stateAccount.Balance, stateAccount.Nonce, stateAccount.CodeHash, stateAccount.StorageRoot) if err != nil { - return fmt.Errorf("error inserting state_accounts entry: %v", err) + return fmt.Errorf("v3 error inserting state_accounts entry: %v", err) } return nil } @@ -207,7 +207,7 @@ func (w *Writer) InsertStorageCID(tx interfaces.Tx, storageCID *models.StorageNo storageCID.HeaderID, storageCID.StatePath, storageKey, storageCID.CID, storageCID.Path, storageCID.NodeType, true, storageCID.MhKey) if err != nil { - return fmt.Errorf("error inserting storage_cids entry: %v", err) + return fmt.Errorf("v3 error inserting storage_cids entry: %v", err) } return nil } -- 2.45.2