From be1757b9fd884cb20c8d7faac8fa81fc49bb7216 Mon Sep 17 00:00:00 2001 From: prathamesh0 <42446521+prathamesh0@users.noreply.github.com> Date: Tue, 16 Aug 2022 17:24:46 +0530 Subject: [PATCH] Refactor indexer tests to avoid duplicate code (#270) * Refactor indexer tests to avoid duplicate code * Refactor file mode indexer tests * Fix expected db stats for sqlx after tx closure * Refactor indexer tests for legacy block * Refactor mainnet indexer tests * Refactor tests for watched addressess methods * Fix query in legacy indexer test --- .../database/file/csv_indexer_legacy_test.go | 77 +- .../indexer/database/file/csv_indexer_test.go | 187 ++- .../database/file/indexer_shared_test.go | 1067 -------------- .../file/mainnet_tests/indexer_test.go | 72 +- .../database/file/sql_indexer_legacy_test.go | 75 +- .../indexer/database/file/sql_indexer_test.go | 163 ++- .../indexer/database/file/test_helpers.go | 68 - .../database/sql/indexer_shared_test.go | 952 +----------- .../sql/mainnet_tests/indexer_test.go | 57 +- .../database/sql/pgx_indexer_legacy_test.go | 59 +- .../indexer/database/sql/pgx_indexer_test.go | 824 +---------- .../database/sql/sqlx_indexer_legacy_test.go | 69 +- .../indexer/database/sql/sqlx_indexer_test.go | 831 +---------- .../indexer/database/sql/test_helpers.go | 84 -- statediff/indexer/mocks/test_data.go | 105 +- statediff/indexer/test/test.go | 1274 +++++++++++++++++ statediff/indexer/test/test_init.go | 248 ++++ statediff/indexer/test/test_legacy.go | 96 ++ statediff/indexer/test/test_mainnet.go | 53 + .../indexer/test/test_watched_addresses.go | 258 ++++ .../test_helpers/mainnet_test_helpers.go | 12 + .../indexer/test_helpers/test_helpers.go | 104 ++ 22 files changed, 2604 insertions(+), 4131 deletions(-) delete mode 100644 statediff/indexer/database/file/indexer_shared_test.go delete mode 100644 statediff/indexer/database/file/test_helpers.go delete mode 100644 statediff/indexer/database/sql/test_helpers.go create mode 100644 statediff/indexer/test/test.go create mode 100644 statediff/indexer/test/test_init.go create mode 100644 statediff/indexer/test/test_legacy.go create mode 100644 statediff/indexer/test/test_mainnet.go create mode 100644 statediff/indexer/test/test_watched_addresses.go diff --git a/statediff/indexer/database/file/csv_indexer_legacy_test.go b/statediff/indexer/database/file/csv_indexer_legacy_test.go index 3f628a91c..55350a912 100644 --- a/statediff/indexer/database/file/csv_indexer_legacy_test.go +++ b/statediff/indexer/database/file/csv_indexer_legacy_test.go @@ -25,66 +25,49 @@ import ( "strings" "testing" - "github.com/jmoiron/sqlx" - "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/statediff/indexer/database/file" "github.com/ethereum/go-ethereum/statediff/indexer/database/file/types" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/test" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) const dbDirectory = "/file_indexer" const pgCopyStatement = `COPY %s FROM '%s' CSV` -func setupCSVLegacy(t *testing.T) { - mockLegacyBlock = legacyData.MockBlock - legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - file.CSVTestConfig.OutputDir = "./statediffing_legacy_test" - +func setupLegacyCSVIndexer(t *testing.T) { if _, err := os.Stat(file.CSVTestConfig.OutputDir); !errors.Is(err, os.ErrNotExist) { err := os.RemoveAll(file.CSVTestConfig.OutputDir) require.NoError(t, err) } - ind, err := file.NewStateDiffIndexer(context.Background(), legacyData.Config, file.CSVTestConfig) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockLegacyBlock, - legacyData.MockReceipts, - legacyData.MockBlock.Difficulty()) + ind, err = file.NewStateDiffIndexer(context.Background(), test.LegacyConfig, file.CSVTestConfig) require.NoError(t, err) - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - if err := ind.Close(); err != nil { - t.Fatal(err) - } - }() - - for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, legacyData.BlockNumber.String(), tx.(*file.BatchTx).BlockNumber) - - connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", connStr) + db, err = postgres.SetupSQLXDB() if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) + t.Fatal(err) } } +func setupLegacyCSV(t *testing.T) { + setupLegacyCSVIndexer(t) + test.SetupLegacyTestData(t, ind) +} + func dumpCSVFileData(t *testing.T) { outputDir := filepath.Join(dbDirectory, file.CSVTestConfig.OutputDir) + workingDir, err := os.Getwd() + require.NoError(t, err) + + localOutputDir := filepath.Join(workingDir, file.CSVTestConfig.OutputDir) for _, tbl := range file.Tables { + err := test_helpers.DedupFile(file.TableFilePath(localOutputDir, tbl.Name)) + require.NoError(t, err) + var stmt string varcharColumns := tbl.VarcharColumns() if len(varcharColumns) > 0 { @@ -98,38 +81,38 @@ func dumpCSVFileData(t *testing.T) { stmt = fmt.Sprintf(pgCopyStatement, tbl.Name, file.TableFilePath(outputDir, tbl.Name)) } - _, err = sqlxdb.Exec(stmt) + _, err = db.Exec(context.Background(), stmt) require.NoError(t, err) } } -func dumpWatchedAddressesCSVFileData(t *testing.T) { +func resetAndDumpWatchedAddressesCSVFileData(t *testing.T) { + test_helpers.TearDownDB(t, db) + outputFilePath := filepath.Join(dbDirectory, file.CSVTestConfig.WatchedAddressesFilePath) stmt := fmt.Sprintf(pgCopyStatement, types.TableWatchedAddresses.Name, outputFilePath) - _, err = sqlxdb.Exec(stmt) + _, err = db.Exec(context.Background(), stmt) require.NoError(t, err) } func tearDownCSV(t *testing.T) { - file.TearDownDB(t, sqlxdb) + test_helpers.TearDownDB(t, db) + require.NoError(t, db.Close()) - err := os.RemoveAll(file.CSVTestConfig.OutputDir) - require.NoError(t, err) + require.NoError(t, os.RemoveAll(file.CSVTestConfig.OutputDir)) if err := os.Remove(file.CSVTestConfig.WatchedAddressesFilePath); !errors.Is(err, os.ErrNotExist) { require.NoError(t, err) } - - err = sqlxdb.Close() - require.NoError(t, err) } -func TestCSVFileIndexerLegacy(t *testing.T) { +func TestLegacyCSVFileIndexer(t *testing.T) { t.Run("Publish and index header IPLDs", func(t *testing.T) { - setupCSVLegacy(t) + setupLegacyCSV(t) dumpCSVFileData(t) defer tearDownCSV(t) - testLegacyPublishAndIndexHeaderIPLDs(t) + + test.TestLegacyIndexer(t, db) }) } diff --git a/statediff/indexer/database/file/csv_indexer_test.go b/statediff/indexer/database/file/csv_indexer_test.go index 761a30f9d..81f425acb 100644 --- a/statediff/indexer/database/file/csv_indexer_test.go +++ b/statediff/indexer/database/file/csv_indexer_test.go @@ -19,16 +19,16 @@ package file_test import ( "context" "errors" + "math/big" "os" "testing" - "github.com/jmoiron/sqlx" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/statediff/indexer/database/file" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) func setupCSVIndexer(t *testing.T) { @@ -47,37 +47,20 @@ func setupCSVIndexer(t *testing.T) { ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.CSVTestConfig) require.NoError(t, err) - connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", connStr) + db, err = postgres.SetupSQLXDB() if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) + t.Fatal(err) } } func setupCSV(t *testing.T) { setupCSVIndexer(t) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockBlock, - mocks.MockReceipts, - mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - if err := ind.Close(); err != nil { - t.Fatal(err) - } - }() - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) - require.NoError(t, err) - } + test.SetupTestData(t, ind) +} - require.Equal(t, mocks.BlockNumber.String(), tx.(*file.BatchTx).BlockNumber) +func setupCSVNonCanonical(t *testing.T) { + setupCSVIndexer(t) + test.SetupTestDataNonCanonical(t, ind) } func TestCSVFileIndexer(t *testing.T) { @@ -86,7 +69,7 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexHeaderIPLDs(t) + test.TestPublishAndIndexHeaderIPLDs(t, db) }) t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) { @@ -94,7 +77,7 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexTransactionIPLDs(t) + test.TestPublishAndIndexTransactionIPLDs(t, db) }) t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) { @@ -102,7 +85,7 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexLogIPLDs(t) + test.TestPublishAndIndexLogIPLDs(t, db) }) t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) { @@ -110,7 +93,7 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexReceiptIPLDs(t) + test.TestPublishAndIndexReceiptIPLDs(t, db) }) t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) { @@ -118,7 +101,7 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexStateIPLDs(t) + test.TestPublishAndIndexStateIPLDs(t, db) }) t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) { @@ -126,7 +109,57 @@ func TestCSVFileIndexer(t *testing.T) { dumpCSVFileData(t) defer tearDownCSV(t) - testPublishAndIndexStorageIPLDs(t) + test.TestPublishAndIndexStorageIPLDs(t, db) + }) +} + +func TestCSVFileIndexerNonCanonical(t *testing.T) { + t.Run("Publish and index header", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexHeaderNonCanonical(t, db) + }) + + t.Run("Publish and index transactions", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexTransactionsNonCanonical(t, db) + }) + + t.Run("Publish and index receipts", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexReceiptsNonCanonical(t, db) + }) + + t.Run("Publish and index logs", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexLogsNonCanonical(t, db) + }) + + t.Run("Publish and index state nodes", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexStateNonCanonical(t, db) + }) + + t.Run("Publish and index storage nodes", func(t *testing.T) { + setupCSVNonCanonical(t) + dumpCSVFileData(t) + defer tearDownCSV(t) + + test.TestPublishAndIndexStorageNonCanonical(t, db) }) } @@ -135,66 +168,88 @@ func TestCSVFileWatchAddressMethods(t *testing.T) { defer tearDownCSV(t) t.Run("Load watched addresses (empty table)", func(t *testing.T) { - testLoadEmptyWatchedAddresses(t) + test.TestLoadEmptyWatchedAddresses(t, ind) }) t.Run("Insert watched addresses", func(t *testing.T) { - testInsertWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetInsertWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestInsertWatchedAddresses(t, db) }) t.Run("Insert watched addresses (some already watched)", func(t *testing.T) { - testInsertAlreadyWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetInsertAlreadyWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestInsertAlreadyWatchedAddresses(t, db) }) t.Run("Remove watched addresses", func(t *testing.T) { - testRemoveWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetRemoveWatchedAddressesArgs() + err = ind.RemoveWatchedAddresses(args) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestRemoveWatchedAddresses(t, db) }) t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) { - testRemoveNonWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetRemoveNonWatchedAddressesArgs() + err = ind.RemoveWatchedAddresses(args) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestRemoveNonWatchedAddresses(t, db) }) t.Run("Set watched addresses", func(t *testing.T) { - testSetWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetSetWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestSetWatchedAddresses(t, db) }) t.Run("Set watched addresses (some already watched)", func(t *testing.T) { - testSetAlreadyWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + args := mocks.GetSetAlreadyWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestSetAlreadyWatchedAddresses(t, db) }) t.Run("Load watched addresses", func(t *testing.T) { - testLoadWatchedAddresses(t) + test.TestLoadWatchedAddresses(t, ind) }) t.Run("Clear watched addresses", func(t *testing.T) { - testClearWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + err = ind.ClearWatchedAddresses() + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestClearWatchedAddresses(t, db) }) t.Run("Clear watched addresses (empty table)", func(t *testing.T) { - testClearEmptyWatchedAddresses(t, func(t *testing.T) { - file.TearDownDB(t, sqlxdb) - dumpWatchedAddressesCSVFileData(t) - }) + err = ind.ClearWatchedAddresses() + require.NoError(t, err) + + resetAndDumpWatchedAddressesCSVFileData(t) + + test.TestClearEmptyWatchedAddresses(t, db) }) } diff --git a/statediff/indexer/database/file/indexer_shared_test.go b/statediff/indexer/database/file/indexer_shared_test.go deleted file mode 100644 index 16e82a6cc..000000000 --- a/statediff/indexer/database/file/indexer_shared_test.go +++ /dev/null @@ -1,1067 +0,0 @@ -// VulcanizeDB -// Copyright © 2022 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package file_test - -import ( - "bytes" - "fmt" - "math/big" - "os" - "testing" - - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - dshelp "github.com/ipfs/go-ipfs-ds-help" - "github.com/jmoiron/sqlx" - "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/statediff/indexer/database/file" - "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" - "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" - sdtypes "github.com/ethereum/go-ethereum/statediff/types" -) - -var ( - legacyData = mocks.NewLegacyData() - mockLegacyBlock *types.Block - legacyHeaderCID cid.Cid - - sqlxdb *sqlx.DB - err error - ind interfaces.StateDiffIndexer - - ipfsPgGet = `SELECT data FROM public.blocks - WHERE key = $1 AND block_number = $2` - watchedAddressesPgGet = `SELECT * FROM eth_meta.watched_addresses` - - tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte - mockBlock *types.Block - headerCID, trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid - rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid - rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte - state1CID, state2CID, storageCID cid.Cid - contract1Address, contract2Address, contract3Address, contract4Address string - contract1CreatedAt, contract2CreatedAt, contract3CreatedAt, contract4CreatedAt uint64 - lastFilledAt, watchedAt1, watchedAt2, watchedAt3 uint64 -) - -func init() { - if os.Getenv("MODE") != "statediff" { - fmt.Println("Skipping statediff test") - os.Exit(0) - } - - mockBlock = mocks.MockBlock - txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts - - buf := new(bytes.Buffer) - txs.EncodeIndex(0, buf) - tx1 = make([]byte, buf.Len()) - copy(tx1, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(1, buf) - tx2 = make([]byte, buf.Len()) - copy(tx2, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(2, buf) - tx3 = make([]byte, buf.Len()) - copy(tx3, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(3, buf) - tx4 = make([]byte, buf.Len()) - copy(tx4, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(4, buf) - tx5 = make([]byte, buf.Len()) - copy(tx5, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(0, buf) - rct1 = make([]byte, buf.Len()) - copy(rct1, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(1, buf) - rct2 = make([]byte, buf.Len()) - copy(rct2, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(2, buf) - rct3 = make([]byte, buf.Len()) - copy(rct3, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(3, buf) - rct4 = make([]byte, buf.Len()) - copy(rct4, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(4, buf) - rct5 = make([]byte, buf.Len()) - copy(rct5, buf.Bytes()) - buf.Reset() - - headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256) - trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256) - trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256) - trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256) - trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256) - trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256) - state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256) - state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256) - storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256) - - receiptTrie := ipld.NewRctTrie() - - receiptTrie.Add(0, rct1) - receiptTrie.Add(1, rct2) - receiptTrie.Add(2, rct3) - receiptTrie.Add(3, rct4) - receiptTrie.Add(4, rct5) - - rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes() - - rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes)) - orderedRctLeafNodes := make([][]byte, len(rctLeafNodes)) - for i, rln := range rctLeafNodes { - var idx uint - - r := bytes.NewReader(keys[i].TrieKey) - rlp.Decode(r, &idx) - rctleafNodeCids[idx] = rln.Cid() - orderedRctLeafNodes[idx] = rln.RawData() - } - - rct1CID = rctleafNodeCids[0] - rct2CID = rctleafNodeCids[1] - rct3CID = rctleafNodeCids[2] - rct4CID = rctleafNodeCids[3] - rct5CID = rctleafNodeCids[4] - - rctLeaf1 = orderedRctLeafNodes[0] - rctLeaf2 = orderedRctLeafNodes[1] - rctLeaf3 = orderedRctLeafNodes[2] - rctLeaf4 = orderedRctLeafNodes[3] - rctLeaf5 = orderedRctLeafNodes[4] - - contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F" - contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B" - contract3Address = "0xcfeB164C328CA13EFd3C77E1980d94975aDfedfc" - contract4Address = "0x0Edf0c4f393a628DE4828B228C48175b3EA297fc" - contract1CreatedAt = uint64(1) - contract2CreatedAt = uint64(2) - contract3CreatedAt = uint64(3) - contract4CreatedAt = uint64(4) - - lastFilledAt = uint64(0) - watchedAt1 = uint64(10) - watchedAt2 = uint64(15) - watchedAt3 = uint64(20) -} - -func expectTrue(t *testing.T, value bool) { - if !value { - t.Fatalf("Assertion failed") - } -} - -func resetDB(t *testing.T) { - file.TearDownDB(t, sqlxdb) - - connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", connStr) - if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) - } -} - -func testLegacyPublishAndIndexHeaderIPLDs(t *testing.T) { - pgStr := `SELECT cid, td, reward, block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = sqlxdb.QueryRowx(pgStr, legacyData.BlockNumber.Uint64()).StructScan(header) - require.NoError(t, err) - - require.Equal(t, legacyHeaderCID.String(), header.CID) - require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "5000000000000011250", header.Reward) - require.Equal(t, legacyData.MockBlock.Coinbase().String(), header.Coinbase) - require.Nil(t, legacyData.MockHeader.BaseFee) -} - -func testPublishAndIndexHeaderIPLDs(t *testing.T) { - pgStr := `SELECT cid, td, reward, block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = sqlxdb.QueryRowx(pgStr, mocks.BlockNumber.Uint64()).StructScan(header) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, headerCID.String(), header.CID) - require.Equal(t, mocks.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "2000000000000021250", header.Reward) - require.Equal(t, mocks.MockHeader.Coinbase.String(), header.Coinbase) - dc, err := cid.Decode(header.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.MockHeaderRlp, data) -} - -func testPublishAndIndexTransactionIPLDs(t *testing.T) { - // check that txs were properly indexed and published - trxs := make([]string, 0) - pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1` - err = sqlxdb.Select(&trxs, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(trxs)) - expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String())) - - transactions := mocks.MockBlock.Transactions() - type txResult struct { - TxType uint8 `db:"tx_type"` - Value string - } - for _, c := range trxs { - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - txTypeAndValueStr := `SELECT tx_type, value FROM eth.transaction_cids WHERE cid = $1` - switch c { - case trx1CID.String(): - require.Equal(t, tx1, data) - txRes := new(txResult) - err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[0].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value) - } - case trx2CID.String(): - require.Equal(t, tx2, data) - txRes := new(txResult) - err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[1].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value) - } - case trx3CID.String(): - require.Equal(t, tx3, data) - txRes := new(txResult) - err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[2].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value) - } - case trx4CID.String(): - require.Equal(t, tx4, data) - txRes := new(txResult) - err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.AccessListTxType { - t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType) - } - if txRes.Value != transactions[3].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) - } - accessListElementModels := make([]models.AccessListElementModel, 0) - pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " + - "access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " + - "INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC" - err = sqlxdb.Select(&accessListElementModels, pgStr, c) - if err != nil { - t.Fatal(err) - } - if len(accessListElementModels) != 2 { - t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) - } - model1 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[0].Index, - Address: accessListElementModels[0].Address, - } - model2 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[1].Index, - Address: accessListElementModels[1].Address, - StorageKeys: accessListElementModels[1].StorageKeys, - } - require.Equal(t, mocks.AccessListEntry1Model, model1) - require.Equal(t, mocks.AccessListEntry2Model, model2) - case trx5CID.String(): - require.Equal(t, tx5, data) - txRes := new(txResult) - err = sqlxdb.QueryRowx(txTypeAndValueStr, c).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.DynamicFeeTxType { - t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType) - } - if txRes.Value != transactions[4].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value) - } - } - } -} - -func testPublishAndIndexLogIPLDs(t *testing.T) { - rcts := make([]string, 0) - pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - ORDER BY transaction_cids.index` - err = sqlxdb.Select(&rcts, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - - type logIPLD struct { - Index int `db:"index"` - Address string `db:"address"` - Data []byte `db:"data"` - Topic0 string `db:"topic0"` - Topic1 string `db:"topic1"` - } - for i := range rcts { - results := make([]logIPLD, 0) - pgStr = `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids - INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id) - INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) - WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` - err = sqlxdb.Select(&results, pgStr, rcts[i]) - require.NoError(t, err) - - // expecting MockLog1 and MockLog2 for mockReceipt4 - expectedLogs := mocks.MockReceipts[i].Logs - require.Equal(t, len(expectedLogs), len(results)) - - var nodeElements []interface{} - for idx, r := range results { - // Attempt to decode the log leaf node. - err = rlp.DecodeBytes(r.Data, &nodeElements) - require.NoError(t, err) - if len(nodeElements) == 2 { - logRaw, err := rlp.EncodeToBytes(expectedLogs[idx]) - require.NoError(t, err) - // 2nd element of the leaf node contains the encoded log data. - require.Equal(t, nodeElements[1].([]byte), logRaw) - } else { - logRaw, err := rlp.EncodeToBytes(expectedLogs[idx]) - require.NoError(t, err) - // raw log was IPLDized - require.Equal(t, r.Data, logRaw) - } - } - } -} - -func testPublishAndIndexReceiptIPLDs(t *testing.T) { - // check receipts were properly indexed and published - rcts := make([]string, 0) - pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 order by transaction_cids.index` - err = sqlxdb.Select(&rcts, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(rcts)) - expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) - - for idx, c := range rcts { - result := make([]models.IPLDModel, 0) - pgStr = `SELECT data - FROM eth.receipt_cids - INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) - WHERE receipt_cids.leaf_cid = $1` - err = sqlxdb.Select(&result, pgStr, c) - if err != nil { - t.Fatal(err) - } - - // Decode the log leaf node. - var nodeElements []interface{} - err = rlp.DecodeBytes(result[0].Data, &nodeElements) - require.NoError(t, err) - - expectedRct, err := mocks.MockReceipts[idx].MarshalBinary() - require.NoError(t, err) - - require.Equal(t, expectedRct, nodeElements[1].([]byte)) - - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - switch c { - case rct1CID.String(): - require.Equal(t, rctLeaf1, data) - var postStatus uint64 - pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` - err = sqlxdb.Get(&postStatus, pgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostStatus, postStatus) - case rct2CID.String(): - require.Equal(t, rctLeaf2, data) - var postState string - err = sqlxdb.Get(&postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState1, postState) - case rct3CID.String(): - require.Equal(t, rctLeaf3, data) - var postState string - err = sqlxdb.Get(&postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState2, postState) - case rct4CID.String(): - require.Equal(t, rctLeaf4, data) - var postState string - err = sqlxdb.Get(&postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - case rct5CID.String(): - require.Equal(t, rctLeaf5, data) - var postState string - err = sqlxdb.Get(&postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - } - } -} - -func testPublishAndIndexStateIPLDs(t *testing.T) { - // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) - pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type != 3` - err = sqlxdb.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for _, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - pgStr = `SELECT cast(block_number AS TEXT), header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel - err = sqlxdb.Get(&account, pgStr, stateNode.HeaderID, stateNode.Path) - if err != nil { - t.Fatal(err) - } - if stateNode.CID == state1CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x06'}, stateNode.Path) - require.Equal(t, mocks.ContractLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "0", - CodeHash: mocks.ContractCodeHash.Bytes(), - StorageRoot: mocks.ContractRoot, - Nonce: 1, - }, account) - } - if stateNode.CID == state2CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x0c'}, stateNode.Path) - require.Equal(t, mocks.AccountLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "1000", - CodeHash: mocks.AccountCodeHash.Bytes(), - StorageRoot: mocks.AccountRoot, - Nonce: 0, - }, account) - } - } - - // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) - pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type = 3` - err = sqlxdb.Select(&stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for idx, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - - if idx == 0 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.RemovedLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x02'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - if idx == 1 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.Contract2LeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x07'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - } -} - -func testPublishAndIndexStorageIPLDs(t *testing.T) { - // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) - pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type != 3` - err = sqlxdb.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 1, len(storageNodes)) - require.Equal(t, models.StorageNodeWithStateKeyModel{ - BlockNumber: mocks.BlockNumber.String(), - CID: storageCID.String(), - NodeType: 2, - StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{}, - }, storageNodes[0]) - var data []byte - dc, err := cid.Decode(storageNodes[0].CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.StorageLeafNode, data) - - // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) - pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type = 3 - ORDER BY storage_path` - err = sqlxdb.Select(&storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 3, len(storageNodes)) - gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3) - for _, model := range storageNodes { - gotStorageNodes[model.StorageKey] = model - } - expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{ - common.BytesToHash(mocks.RemovedLeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{'\x03'}, - }, - common.BytesToHash(mocks.Storage2LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0e'}, - }, - common.BytesToHash(mocks.Storage3LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0f'}, - }, - } - for storageKey, storageNode := range gotStorageNodes { - require.Equal(t, expectedStorageNodes[storageKey], storageNode) - dc, err = cid.Decode(storageNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey = dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, []byte{}, data) - } -} - -func testLoadEmptyWatchedAddresses(t *testing.T) { - expectedData := []common.Address{} - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -type res struct { - Address string `db:"address"` - CreatedAt uint64 `db:"created_at"` - WatchedAt uint64 `db:"watched_at"` - LastFilledAt uint64 `db:"last_filled_at"` -} - -func testInsertWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1))) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testInsertAlreadyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2))) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testRemoveWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.RemoveWatchedAddresses(args) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testRemoveNonWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{} - - err = ind.RemoveWatchedAddresses(args) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testSetWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2))) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testSetAlreadyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3))) - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testLoadWatchedAddresses(t *testing.T) { - expectedData := []common.Address{ - common.HexToAddress(contract4Address), - common.HexToAddress(contract2Address), - common.HexToAddress(contract3Address), - } - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testClearWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - expectedData := []res{} - - err = ind.ClearWatchedAddresses() - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} - -func testClearEmptyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) { - expectedData := []res{} - - err = ind.ClearWatchedAddresses() - require.NoError(t, err) - resetAndDumpData(t) - - rows := []res{} - err = sqlxdb.Select(&rows, watchedAddressesPgGet) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } -} diff --git a/statediff/indexer/database/file/mainnet_tests/indexer_test.go b/statediff/indexer/database/file/mainnet_tests/indexer_test.go index fc1da7285..392fb2ee3 100644 --- a/statediff/indexer/database/file/mainnet_tests/indexer_test.go +++ b/statediff/indexer/database/file/mainnet_tests/indexer_test.go @@ -24,20 +24,22 @@ import ( "os" "testing" - "github.com/jmoiron/sqlx" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/statediff/indexer/database/file" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) var ( - sqlxdb *sqlx.DB + err error + db sql.Database + ind interfaces.StateDiffIndexer chainConf = params.MainnetChainConfig ) @@ -53,67 +55,44 @@ func init() { } func TestPushBlockAndState(t *testing.T) { - conf := test_helpers.DefaultTestConfig - rawURL := os.Getenv(test_helpers.TEST_RAW_URL) - if rawURL == "" { - fmt.Printf("Warning: no raw url configured for statediffing mainnet tests, will look for local file and"+ - "then try default endpoint (%s)\r\n", test_helpers.DefaultTestConfig.RawURL) - } else { - conf.RawURL = rawURL - } + conf := test_helpers.GetTestConfig() + for _, blockNumber := range test_helpers.ProblemBlocks { conf.BlockNumber = big.NewInt(blockNumber) tb, trs, err := test_helpers.TestBlockAndReceipts(conf) require.NoError(t, err) + testPushBlockAndState(t, tb, trs) } + testBlock, testReceipts, err := test_helpers.TestBlockAndReceiptsFromEnv(conf) require.NoError(t, err) + testPushBlockAndState(t, testBlock, testReceipts) } func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Receipts) { t.Run("Test PushBlock and PushStateNode", func(t *testing.T) { - setup(t, block, receipts) - dumpData(t) - tearDown(t) + setupMainnetIndexer(t) + defer dumpData(t) + defer tearDown(t) + + test.TestBlock(t, ind, block, receipts) }) } -func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { +func setupMainnetIndexer(t *testing.T) { if _, err := os.Stat(file.CSVTestConfig.FilePath); !errors.Is(err, os.ErrNotExist) { err := os.Remove(file.CSVTestConfig.FilePath) require.NoError(t, err) } - ind, err := file.NewStateDiffIndexer(context.Background(), chainConf, file.CSVTestConfig) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - testBlock, - testReceipts, - testBlock.Difficulty()) + + ind, err = file.NewStateDiffIndexer(context.Background(), chainConf, file.CSVTestConfig) require.NoError(t, err) - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - if err := ind.Close(); err != nil { - t.Fatal(err) - } - }() - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, testBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, testBlock.Number().String(), tx.(*file.BatchTx).BlockNumber) - - connStr := postgres.DefaultConfig.DbConnectionString() - - sqlxdb, err = sqlx.Connect("postgres", connStr) + db, err = postgres.SetupSQLXDB() if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) + t.Fatal(err) } } @@ -121,14 +100,13 @@ func dumpData(t *testing.T) { sqlFileBytes, err := os.ReadFile(file.CSVTestConfig.FilePath) require.NoError(t, err) - _, err = sqlxdb.Exec(string(sqlFileBytes)) + _, err = db.Exec(context.Background(), string(sqlFileBytes)) require.NoError(t, err) } func tearDown(t *testing.T) { - file.TearDownDB(t, sqlxdb) - err := os.Remove(file.CSVTestConfig.FilePath) - require.NoError(t, err) - err = sqlxdb.Close() - require.NoError(t, err) + test_helpers.TearDownDB(t, db) + require.NoError(t, db.Close()) + + require.NoError(t, os.Remove(file.CSVTestConfig.FilePath)) } diff --git a/statediff/indexer/database/file/sql_indexer_legacy_test.go b/statediff/indexer/database/file/sql_indexer_legacy_test.go index 683fd814b..02ced177e 100644 --- a/statediff/indexer/database/file/sql_indexer_legacy_test.go +++ b/statediff/indexer/database/file/sql_indexer_legacy_test.go @@ -22,93 +22,80 @@ import ( "os" "testing" - "github.com/jmoiron/sqlx" - "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/statediff/indexer/database/file" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/test" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) -func setupLegacy(t *testing.T) { - mockLegacyBlock = legacyData.MockBlock - legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) +var ( + db sql.Database + err error + ind interfaces.StateDiffIndexer +) +func setupLegacySQLIndexer(t *testing.T) { if _, err := os.Stat(file.SQLTestConfig.FilePath); !errors.Is(err, os.ErrNotExist) { err := os.Remove(file.SQLTestConfig.FilePath) require.NoError(t, err) } - ind, err := file.NewStateDiffIndexer(context.Background(), legacyData.Config, file.SQLTestConfig) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockLegacyBlock, - legacyData.MockReceipts, - legacyData.MockBlock.Difficulty()) + + ind, err = file.NewStateDiffIndexer(context.Background(), test.LegacyConfig, file.SQLTestConfig) require.NoError(t, err) - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - if err := ind.Close(); err != nil { - t.Fatal(err) - } - }() - - for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, legacyData.BlockNumber.String(), tx.(*file.BatchTx).BlockNumber) - - connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", connStr) + db, err = postgres.SetupSQLXDB() if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) + t.Fatal(err) } } +func setupLegacySQL(t *testing.T) { + setupLegacySQLIndexer(t) + test.SetupLegacyTestData(t, ind) +} + func dumpFileData(t *testing.T) { + err := test_helpers.DedupFile(file.SQLTestConfig.FilePath) + require.NoError(t, err) + sqlFileBytes, err := os.ReadFile(file.SQLTestConfig.FilePath) require.NoError(t, err) - _, err = sqlxdb.Exec(string(sqlFileBytes)) + _, err = db.Exec(context.Background(), string(sqlFileBytes)) require.NoError(t, err) } func resetAndDumpWatchedAddressesFileData(t *testing.T) { - resetDB(t) + test_helpers.TearDownDB(t, db) sqlFileBytes, err := os.ReadFile(file.SQLTestConfig.WatchedAddressesFilePath) require.NoError(t, err) - _, err = sqlxdb.Exec(string(sqlFileBytes)) + _, err = db.Exec(context.Background(), string(sqlFileBytes)) require.NoError(t, err) } func tearDown(t *testing.T) { - file.TearDownDB(t, sqlxdb) + test_helpers.TearDownDB(t, db) + require.NoError(t, db.Close()) - err := os.Remove(file.SQLTestConfig.FilePath) - require.NoError(t, err) + require.NoError(t, os.Remove(file.SQLTestConfig.FilePath)) if err := os.Remove(file.SQLTestConfig.WatchedAddressesFilePath); !errors.Is(err, os.ErrNotExist) { require.NoError(t, err) } - - err = sqlxdb.Close() - require.NoError(t, err) } -func TestSQLFileIndexerLegacy(t *testing.T) { +func TestLegacySQLFileIndexer(t *testing.T) { t.Run("Publish and index header IPLDs", func(t *testing.T) { - setupLegacy(t) + setupLegacySQL(t) dumpFileData(t) defer tearDown(t) - testLegacyPublishAndIndexHeaderIPLDs(t) + + test.TestLegacyIndexer(t, db) }) } diff --git a/statediff/indexer/database/file/sql_indexer_test.go b/statediff/indexer/database/file/sql_indexer_test.go index 8a0bb5115..0a73a8c47 100644 --- a/statediff/indexer/database/file/sql_indexer_test.go +++ b/statediff/indexer/database/file/sql_indexer_test.go @@ -19,16 +19,16 @@ package file_test import ( "context" "errors" + "math/big" "os" "testing" - "github.com/jmoiron/sqlx" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/statediff/indexer/database/file" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) func setupIndexer(t *testing.T) { @@ -45,37 +45,20 @@ func setupIndexer(t *testing.T) { ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.SQLTestConfig) require.NoError(t, err) - connStr := postgres.DefaultConfig.DbConnectionString() - sqlxdb, err = sqlx.Connect("postgres", connStr) + db, err = postgres.SetupSQLXDB() if err != nil { - t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err) + t.Fatal(err) } } func setup(t *testing.T) { setupIndexer(t) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockBlock, - mocks.MockReceipts, - mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - if err := ind.Close(); err != nil { - t.Fatal(err) - } - }() - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) - require.NoError(t, err) - } + test.SetupTestData(t, ind) +} - require.Equal(t, mocks.BlockNumber.String(), tx.(*file.BatchTx).BlockNumber) +func setupSQLNonCanonical(t *testing.T) { + setupIndexer(t) + test.SetupTestDataNonCanonical(t, ind) } func TestSQLFileIndexer(t *testing.T) { @@ -84,7 +67,7 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexHeaderIPLDs(t) + test.TestPublishAndIndexHeaderIPLDs(t, db) }) t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) { @@ -92,7 +75,7 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexTransactionIPLDs(t) + test.TestPublishAndIndexTransactionIPLDs(t, db) }) t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) { @@ -100,7 +83,7 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexLogIPLDs(t) + test.TestPublishAndIndexLogIPLDs(t, db) }) t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) { @@ -108,7 +91,7 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexReceiptIPLDs(t) + test.TestPublishAndIndexReceiptIPLDs(t, db) }) t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) { @@ -116,7 +99,7 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexStateIPLDs(t) + test.TestPublishAndIndexStateIPLDs(t, db) }) t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) { @@ -124,7 +107,57 @@ func TestSQLFileIndexer(t *testing.T) { dumpFileData(t) defer tearDown(t) - testPublishAndIndexStorageIPLDs(t) + test.TestPublishAndIndexStorageIPLDs(t, db) + }) +} + +func TestSQLFileIndexerNonCanonical(t *testing.T) { + t.Run("Publish and index header", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexHeaderNonCanonical(t, db) + }) + + t.Run("Publish and index transactions", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexTransactionsNonCanonical(t, db) + }) + + t.Run("Publish and index receipts", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexReceiptsNonCanonical(t, db) + }) + + t.Run("Publish and index logs", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexLogsNonCanonical(t, db) + }) + + t.Run("Publish and index state nodes", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexStateNonCanonical(t, db) + }) + + t.Run("Publish and index storage nodes", func(t *testing.T) { + setupSQLNonCanonical(t) + dumpFileData(t) + defer tearDown(t) + + test.TestPublishAndIndexStorageNonCanonical(t, db) }) } @@ -133,42 +166,88 @@ func TestSQLFileWatchAddressMethods(t *testing.T) { defer tearDown(t) t.Run("Load watched addresses (empty table)", func(t *testing.T) { - testLoadEmptyWatchedAddresses(t) + test.TestLoadEmptyWatchedAddresses(t, ind) }) t.Run("Insert watched addresses", func(t *testing.T) { - testInsertWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetInsertWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestInsertWatchedAddresses(t, db) }) t.Run("Insert watched addresses (some already watched)", func(t *testing.T) { - testInsertAlreadyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetInsertAlreadyWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestInsertAlreadyWatchedAddresses(t, db) }) t.Run("Remove watched addresses", func(t *testing.T) { - testRemoveWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetRemoveWatchedAddressesArgs() + err = ind.RemoveWatchedAddresses(args) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestRemoveWatchedAddresses(t, db) }) t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) { - testRemoveNonWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetRemoveNonWatchedAddressesArgs() + err = ind.RemoveWatchedAddresses(args) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestRemoveNonWatchedAddresses(t, db) }) t.Run("Set watched addresses", func(t *testing.T) { - testSetWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetSetWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestSetWatchedAddresses(t, db) }) t.Run("Set watched addresses (some already watched)", func(t *testing.T) { - testSetAlreadyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + args := mocks.GetSetAlreadyWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3))) + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestSetAlreadyWatchedAddresses(t, db) }) t.Run("Load watched addresses", func(t *testing.T) { - testLoadWatchedAddresses(t) + test.TestLoadWatchedAddresses(t, ind) }) t.Run("Clear watched addresses", func(t *testing.T) { - testClearWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + err = ind.ClearWatchedAddresses() + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestClearWatchedAddresses(t, db) }) t.Run("Clear watched addresses (empty table)", func(t *testing.T) { - testClearEmptyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData) + err = ind.ClearWatchedAddresses() + require.NoError(t, err) + + resetAndDumpWatchedAddressesFileData(t) + + test.TestClearEmptyWatchedAddresses(t, db) }) } diff --git a/statediff/indexer/database/file/test_helpers.go b/statediff/indexer/database/file/test_helpers.go deleted file mode 100644 index 27a1581a4..000000000 --- a/statediff/indexer/database/file/test_helpers.go +++ /dev/null @@ -1,68 +0,0 @@ -package file - -import ( - "testing" - - "github.com/jmoiron/sqlx" -) - -// TearDownDB is used to tear down the watcher dbs after tests -func TearDownDB(t *testing.T, db *sqlx.DB) { - tx, err := db.Begin() - if err != nil { - t.Fatal(err) - } - - _, err = tx.Exec(`DELETE FROM eth.header_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.uncle_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.transaction_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.receipt_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.state_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.storage_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.state_accounts`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.access_list_elements`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth.log_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM blocks`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM nodes`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(`DELETE FROM eth_meta.watched_addresses`) - if err != nil { - t.Fatal(err) - } - err = tx.Commit() - if err != nil { - t.Fatal(err) - } -} diff --git a/statediff/indexer/database/sql/indexer_shared_test.go b/statediff/indexer/database/sql/indexer_shared_test.go index db301f160..13fd0c026 100644 --- a/statediff/indexer/database/sql/indexer_shared_test.go +++ b/statediff/indexer/database/sql/indexer_shared_test.go @@ -1,259 +1,21 @@ package sql_test import ( - "bytes" - "context" - "fmt" - "os" - "sort" "testing" - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) var ( - db sql.Database - err error - ind interfaces.StateDiffIndexer - ipfsPgGet = `SELECT data FROM public.blocks - WHERE key = $1 AND block_number = $2` - tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte - nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte - nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte - mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block - headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid - trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid - rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid - nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid - nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid - rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte - nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte - nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte - state1CID, state2CID, storageCID cid.Cid - contract1Address, contract2Address, contract3Address, contract4Address string - contract1CreatedAt, contract2CreatedAt, contract3CreatedAt, contract4CreatedAt uint64 - lastFilledAt, watchedAt1, watchedAt2, watchedAt3 uint64 + db sql.Database + err error + ind interfaces.StateDiffIndexer ) -func init() { - if os.Getenv("MODE") != "statediff" { - fmt.Println("Skipping statediff test") - os.Exit(0) - } - - // canonical block at LondonBlock height - mockBlock = mocks.MockBlock - txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts - - // non-canonical block at LondonBlock height - mockNonCanonicalBlock = mocks.MockNonCanonicalBlock - nonCanonicalBlockRcts := mocks.MockNonCanonicalBlockReceipts - - // non-canonical block at LondonBlock height + 1 - mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2 - nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts - - // encode mock receipts - buf := new(bytes.Buffer) - txs.EncodeIndex(0, buf) - tx1 = make([]byte, buf.Len()) - copy(tx1, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(1, buf) - tx2 = make([]byte, buf.Len()) - copy(tx2, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(2, buf) - tx3 = make([]byte, buf.Len()) - copy(tx3, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(3, buf) - tx4 = make([]byte, buf.Len()) - copy(tx4, buf.Bytes()) - buf.Reset() - - txs.EncodeIndex(4, buf) - tx5 = make([]byte, buf.Len()) - copy(tx5, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(0, buf) - rct1 = make([]byte, buf.Len()) - copy(rct1, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(1, buf) - rct2 = make([]byte, buf.Len()) - copy(rct2, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(2, buf) - rct3 = make([]byte, buf.Len()) - copy(rct3, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(3, buf) - rct4 = make([]byte, buf.Len()) - copy(rct4, buf.Bytes()) - buf.Reset() - - rcts.EncodeIndex(4, buf) - rct5 = make([]byte, buf.Len()) - copy(rct5, buf.Bytes()) - buf.Reset() - - // encode mock receipts for non-canonical blocks - nonCanonicalBlockRcts.EncodeIndex(0, buf) - nonCanonicalBlockRct1 = make([]byte, buf.Len()) - copy(nonCanonicalBlockRct1, buf.Bytes()) - buf.Reset() - - nonCanonicalBlockRcts.EncodeIndex(1, buf) - nonCanonicalBlockRct2 = make([]byte, buf.Len()) - copy(nonCanonicalBlockRct2, buf.Bytes()) - buf.Reset() - - nonCanonicalBlock2Rcts.EncodeIndex(0, buf) - nonCanonicalBlock2Rct1 = make([]byte, buf.Len()) - copy(nonCanonicalBlock2Rct1, buf.Bytes()) - buf.Reset() - - nonCanonicalBlock2Rcts.EncodeIndex(1, buf) - nonCanonicalBlock2Rct2 = make([]byte, buf.Len()) - copy(nonCanonicalBlock2Rct2, buf.Bytes()) - buf.Reset() - - headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256) - mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256) - mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256) - trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256) - trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256) - trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256) - trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256) - trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256) - state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256) - state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256) - storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256) - - // create raw receipts - rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5}) - - rct1CID = rctleafNodeCids[0] - rct2CID = rctleafNodeCids[1] - rct3CID = rctleafNodeCids[2] - rct4CID = rctleafNodeCids[3] - rct5CID = rctleafNodeCids[4] - - rctLeaf1 = rawRctLeafNodes[0] - rctLeaf2 = rawRctLeafNodes[1] - rctLeaf3 = rawRctLeafNodes[2] - rctLeaf4 = rawRctLeafNodes[3] - rctLeaf5 = rawRctLeafNodes[4] - - // create raw receipts for non-canonical blocks - nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2}) - - nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0] - nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1] - - nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0] - nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1] - - nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2}) - - nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0] - nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1] - - nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0] - nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1] - - contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F" - contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B" - contract3Address = "0xcfeB164C328CA13EFd3C77E1980d94975aDfedfc" - contract4Address = "0x0Edf0c4f393a628DE4828B228C48175b3EA297fc" - contract1CreatedAt = uint64(1) - contract2CreatedAt = uint64(2) - contract3CreatedAt = uint64(3) - contract4CreatedAt = uint64(4) - - lastFilledAt = uint64(0) - watchedAt1 = uint64(10) - watchedAt2 = uint64(15) - watchedAt3 = uint64(20) -} - -// createRctTrie creates a receipt trie from the given raw receipts -// returns receipt leaf nodes and their CIDs -func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) { - receiptTrie := ipld.NewRctTrie() - - for i, rct := range rcts { - receiptTrie.Add(i, rct) - } - rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes() - - rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes)) - orderedRctLeafNodes := make([][]byte, len(rctLeafNodes)) - for i, rln := range rctLeafNodes { - var idx uint - - r := bytes.NewReader(keys[i].TrieKey) - rlp.Decode(r, &idx) - rctleafNodeCids[idx] = rln.Cid() - orderedRctLeafNodes[idx] = rln.RawData() - } - - return orderedRctLeafNodes, rctleafNodeCids -} - -// createRctModel creates a models.ReceiptModel object from a given ethereum receipt -func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.ReceiptModel { - rctModel := models.ReceiptModel{ - BlockNumber: blockNumber, - HeaderID: rct.BlockHash.String(), - TxID: rct.TxHash.String(), - LeafCID: cid.String(), - LeafMhKey: shared.MultihashKeyFromCID(cid), - LogRoot: rct.LogRoot.String(), - } - - contract := shared.HandleZeroAddr(rct.ContractAddress) - rctModel.Contract = contract - if contract != "" { - rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() - } - - if len(rct.PostState) == 0 { - rctModel.PostStatus = rct.Status - } else { - rctModel.PostState = common.Bytes2Hex(rct.PostState) - } - - return rctModel -} - -func expectTrue(t *testing.T, value bool) { - if !value { - t.Fatalf("Assertion failed") - } -} - func checkTxClosure(t *testing.T, idle, inUse, open int64) { require.Equal(t, idle, db.Stats().Idle()) require.Equal(t, inUse, db.Stats().InUse()) @@ -261,708 +23,6 @@ func checkTxClosure(t *testing.T, idle, inUse, open int64) { } func tearDown(t *testing.T) { - sql.TearDownDB(t, db) - err := ind.Close() - require.NoError(t, err) -} - -// setupTestData indexes a single mock block along with it's state nodes -func setupTestData(t *testing.T) { - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockBlock, - mocks.MockReceipts, - mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - }() - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, mocks.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber) -} - -// setupTestData indexes a mock block and a non-canonical mock block at London height -// and a non-canonical block at London height + 1 -// along with their state nodes -func setupTestDataNonCanonical(t *testing.T) { - // index a canonical block at London height - var tx1 interfaces.Batch - tx1, err = ind.PushBlock( - mockBlock, - mocks.MockReceipts, - mocks.MockBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx1, node, mockBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, mocks.BlockNumber.String(), tx1.(*sql.BatchTx).BlockNumber) - if err := tx1.Submit(err); err != nil { - t.Fatal(err) - } - - // index a non-canonical block at London height - // has transactions overlapping with that of the canonical block - var tx2 interfaces.Batch - tx2, err = ind.PushBlock( - mockNonCanonicalBlock, - mocks.MockNonCanonicalBlockReceipts, - mockNonCanonicalBlock.Difficulty()) - if err != nil { - t.Fatal(err) - } - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx2, node, mockNonCanonicalBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, mocks.BlockNumber.String(), tx2.(*sql.BatchTx).BlockNumber) - if err := tx2.Submit(err); err != nil { - t.Fatal(err) - } - - // index a non-canonical block at London height + 1 - // has transactions overlapping with that of the canonical block - var tx3 interfaces.Batch - tx3, err = ind.PushBlock( - mockNonCanonicalBlock2, - mocks.MockNonCanonicalBlock2Receipts, - mockNonCanonicalBlock2.Difficulty()) - if err != nil { - t.Fatal(err) - } - for _, node := range mocks.StateDiffs[:2] { - err = ind.PushStateNode(tx3, node, mockNonCanonicalBlock2.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, mocks.Block2Number.String(), tx3.(*sql.BatchTx).BlockNumber) - if err := tx3.Submit(err); err != nil { - t.Fatal(err) - } -} - -func testPublishAndIndexHeaderNonCanonical(t *testing.T) { - // check indexed headers - pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT), - tx_root, receipt_root, uncle_root, coinbase - FROM eth.header_cids - ORDER BY block_number` - headerRes := make([]models.HeaderModel, 0) - err = db.Select(context.Background(), &headerRes, pgStr) - if err != nil { - t.Fatal(err) - } - - // expect three blocks to be indexed - // a canonical and a non-canonical block at London height, - // non-canonical block at London height + 1 - expectedRes := []models.HeaderModel{ - { - BlockNumber: mockBlock.Number().String(), - BlockHash: mockBlock.Hash().String(), - CID: headerCID.String(), - TotalDifficulty: mockBlock.Difficulty().String(), - TxRoot: mockBlock.TxHash().String(), - RctRoot: mockBlock.ReceiptHash().String(), - UncleRoot: mockBlock.UncleHash().String(), - Coinbase: mocks.MockHeader.Coinbase.String(), - }, - { - BlockNumber: mockNonCanonicalBlock.Number().String(), - BlockHash: mockNonCanonicalBlock.Hash().String(), - CID: mockNonCanonicalHeaderCID.String(), - TotalDifficulty: mockNonCanonicalBlock.Difficulty().String(), - TxRoot: mockNonCanonicalBlock.TxHash().String(), - RctRoot: mockNonCanonicalBlock.ReceiptHash().String(), - UncleRoot: mockNonCanonicalBlock.UncleHash().String(), - Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(), - }, - { - BlockNumber: mockNonCanonicalBlock2.Number().String(), - BlockHash: mockNonCanonicalBlock2.Hash().String(), - CID: mockNonCanonicalHeader2CID.String(), - TotalDifficulty: mockNonCanonicalBlock2.Difficulty().String(), - TxRoot: mockNonCanonicalBlock2.TxHash().String(), - RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(), - UncleRoot: mockNonCanonicalBlock2.UncleHash().String(), - Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(), - }, - } - expectedRes[0].Reward = shared.CalcEthBlockReward(mockBlock.Header(), mockBlock.Uncles(), mockBlock.Transactions(), mocks.MockReceipts).String() - expectedRes[1].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock.Header(), mockNonCanonicalBlock.Uncles(), mockNonCanonicalBlock.Transactions(), mocks.MockNonCanonicalBlockReceipts).String() - expectedRes[2].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock2.Header(), mockNonCanonicalBlock2.Uncles(), mockNonCanonicalBlock2.Transactions(), mocks.MockNonCanonicalBlock2Receipts).String() - - require.Equal(t, len(expectedRes), len(headerRes)) - require.ElementsMatch(t, - []string{mockBlock.Hash().String(), mockNonCanonicalBlock.Hash().String(), mockNonCanonicalBlock2.Hash().String()}, - []string{headerRes[0].BlockHash, headerRes[1].BlockHash, headerRes[2].BlockHash}, - ) - - if headerRes[0].BlockHash == mockBlock.Hash().String() { - require.Equal(t, expectedRes[0], headerRes[0]) - require.Equal(t, expectedRes[1], headerRes[1]) - require.Equal(t, expectedRes[2], headerRes[2]) - } else { - require.Equal(t, expectedRes[1], headerRes[0]) - require.Equal(t, expectedRes[0], headerRes[1]) - require.Equal(t, expectedRes[2], headerRes[2]) - } - - // check indexed IPLD blocks - headerCIDs := []cid.Cid{headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID} - blockNumbers := []uint64{mocks.BlockNumber.Uint64(), mocks.BlockNumber.Uint64(), mocks.Block2Number.Uint64()} - headerRLPs := [][]byte{mocks.MockHeaderRlp, mocks.MockNonCanonicalHeaderRlp, mocks.MockNonCanonicalHeader2Rlp} - for i := range expectedRes { - var data []byte - prefixedKey := shared.MultihashKeyFromCID(headerCIDs[i]) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, blockNumbers[i]) - if err != nil { - t.Fatal(err) - } - require.Equal(t, headerRLPs[i], data) - } -} - -func testPublishAndIndexTransactionsNonCanonical(t *testing.T) { - // check indexed transactions - pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index, - tx_data, tx_type, CAST(value as TEXT) - FROM eth.transaction_cids - ORDER BY block_number, index` - txRes := make([]models.TxModel, 0) - err = db.Select(context.Background(), &txRes, pgStr) - if err != nil { - t.Fatal(err) - } - - // expected transactions in the canonical block - mockBlockTxs := mocks.MockBlock.Transactions() - expectedBlockTxs := []models.TxModel{ - { - BlockNumber: mockBlock.Number().String(), - HeaderID: mockBlock.Hash().String(), - TxHash: mockBlockTxs[0].Hash().String(), - CID: trx1CID.String(), - Dst: shared.HandleZeroAddrPointer(mockBlockTxs[0].To()), - Src: mocks.SenderAddr.String(), - Index: 0, - Data: mockBlockTxs[0].Data(), - Type: mockBlockTxs[0].Type(), - Value: mockBlockTxs[0].Value().String(), - }, - { - BlockNumber: mockBlock.Number().String(), - HeaderID: mockBlock.Hash().String(), - TxHash: mockBlockTxs[1].Hash().String(), - CID: trx2CID.String(), - Dst: shared.HandleZeroAddrPointer(mockBlockTxs[1].To()), - Src: mocks.SenderAddr.String(), - Index: 1, - Data: mockBlockTxs[1].Data(), - Type: mockBlockTxs[1].Type(), - Value: mockBlockTxs[1].Value().String(), - }, - { - BlockNumber: mockBlock.Number().String(), - HeaderID: mockBlock.Hash().String(), - TxHash: mockBlockTxs[2].Hash().String(), - CID: trx3CID.String(), - Dst: shared.HandleZeroAddrPointer(mockBlockTxs[2].To()), - Src: mocks.SenderAddr.String(), - Index: 2, - Data: mockBlockTxs[2].Data(), - Type: mockBlockTxs[2].Type(), - Value: mockBlockTxs[2].Value().String(), - }, - { - BlockNumber: mockBlock.Number().String(), - HeaderID: mockBlock.Hash().String(), - TxHash: mockBlockTxs[3].Hash().String(), - CID: trx4CID.String(), - Dst: shared.HandleZeroAddrPointer(mockBlockTxs[3].To()), - Src: mocks.SenderAddr.String(), - Index: 3, - Data: mockBlockTxs[3].Data(), - Type: mockBlockTxs[3].Type(), - Value: mockBlockTxs[3].Value().String(), - }, - { - BlockNumber: mockBlock.Number().String(), - HeaderID: mockBlock.Hash().String(), - TxHash: mockBlockTxs[4].Hash().String(), - CID: trx5CID.String(), - Dst: shared.HandleZeroAddrPointer(mockBlockTxs[4].To()), - Src: mocks.SenderAddr.String(), - Index: 4, - Data: mockBlockTxs[4].Data(), - Type: mockBlockTxs[4].Type(), - Value: mockBlockTxs[4].Value().String(), - }, - } - - // expected transactions in the canonical block at London height - mockNonCanonicalBlockTxs := mockNonCanonicalBlock.Transactions() - expectedNonCanonicalBlockTxs := []models.TxModel{ - { - BlockNumber: mockNonCanonicalBlock.Number().String(), - HeaderID: mockNonCanonicalBlock.Hash().String(), - TxHash: mockNonCanonicalBlockTxs[0].Hash().String(), - CID: trx2CID.String(), - Dst: mockNonCanonicalBlockTxs[0].To().String(), - Src: mocks.SenderAddr.String(), - Index: 0, - Data: mockNonCanonicalBlockTxs[0].Data(), - Type: mockNonCanonicalBlockTxs[0].Type(), - Value: mockNonCanonicalBlockTxs[0].Value().String(), - }, - { - BlockNumber: mockNonCanonicalBlock.Number().String(), - HeaderID: mockNonCanonicalBlock.Hash().String(), - TxHash: mockNonCanonicalBlockTxs[1].Hash().String(), - CID: trx5CID.String(), - Dst: mockNonCanonicalBlockTxs[1].To().String(), - Src: mocks.SenderAddr.String(), - Index: 1, - Data: mockNonCanonicalBlockTxs[1].Data(), - Type: mockNonCanonicalBlockTxs[1].Type(), - Value: mockNonCanonicalBlockTxs[1].Value().String(), - }, - } - - // expected transactions in the canonical block at London height + 1 - mockNonCanonicalBlock2Txs := mockNonCanonicalBlock2.Transactions() - expectedNonCanonicalBlock2Txs := []models.TxModel{ - { - BlockNumber: mockNonCanonicalBlock2.Number().String(), - HeaderID: mockNonCanonicalBlock2.Hash().String(), - TxHash: mockNonCanonicalBlock2Txs[0].Hash().String(), - CID: trx3CID.String(), - Dst: "", - Src: mocks.SenderAddr.String(), - Index: 0, - Data: mockNonCanonicalBlock2Txs[0].Data(), - Type: mockNonCanonicalBlock2Txs[0].Type(), - Value: mockNonCanonicalBlock2Txs[0].Value().String(), - }, - { - BlockNumber: mockNonCanonicalBlock2.Number().String(), - HeaderID: mockNonCanonicalBlock2.Hash().String(), - TxHash: mockNonCanonicalBlock2Txs[1].Hash().String(), - CID: trx5CID.String(), - Dst: mockNonCanonicalBlock2Txs[1].To().String(), - Src: mocks.SenderAddr.String(), - Index: 1, - Data: mockNonCanonicalBlock2Txs[1].Data(), - Type: mockNonCanonicalBlock2Txs[1].Type(), - Value: mockNonCanonicalBlock2Txs[1].Value().String(), - }, - } - - require.Equal(t, len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+len(expectedNonCanonicalBlock2Txs), len(txRes)) - - // sort results such that non-canonical block transactions come after canonical block ones - sort.SliceStable(txRes, func(i, j int) bool { - if txRes[i].BlockNumber < txRes[j].BlockNumber { - return true - } else if txRes[i].HeaderID == txRes[j].HeaderID { - return txRes[i].Index < txRes[j].Index - } else if txRes[i].HeaderID == mockBlock.Hash().String() { - return true - } else { - return false - } - }) - - for i, expectedTx := range expectedBlockTxs { - require.Equal(t, expectedTx, txRes[i]) - } - for i, expectedTx := range expectedNonCanonicalBlockTxs { - require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+i]) - } - for i, expectedTx := range expectedNonCanonicalBlock2Txs { - require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+i]) - } - - // check indexed IPLD blocks - var data []byte - var prefixedKey string - - txCIDs := []cid.Cid{trx1CID, trx2CID, trx3CID, trx4CID, trx5CID} - txRLPs := [][]byte{tx1, tx2, tx3, tx4, tx5} - for i, txCID := range txCIDs { - prefixedKey = shared.MultihashKeyFromCID(txCID) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, txRLPs[i], data) - } -} - -func testPublishAndIndexReceiptsNonCanonical(t *testing.T) { - // check indexed receipts - pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, leaf_cid, leaf_mh_key, post_status, post_state, contract, contract_hash, log_root - FROM eth.receipt_cids - ORDER BY block_number` - rctRes := make([]models.ReceiptModel, 0) - err = db.Select(context.Background(), &rctRes, pgStr) - if err != nil { - t.Fatal(err) - } - - // expected receipts in the canonical block - rctCids := []cid.Cid{rct1CID, rct2CID, rct3CID, rct4CID, rct5CID} - expectedBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockReceipts)) - for i, mockBlockRct := range mocks.MockReceipts { - rctModel := createRctModel(mockBlockRct, rctCids[i], mockBlock.Number().String()) - expectedBlockRctsMap[rctCids[i].String()] = rctModel - } - - // expected receipts in the canonical block at London height + 1 - nonCanonicalBlockRctCids := []cid.Cid{nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID} - expectedNonCanonicalBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlockReceipts)) - for i, mockNonCanonicalBlockRct := range mocks.MockNonCanonicalBlockReceipts { - rctModel := createRctModel(mockNonCanonicalBlockRct, nonCanonicalBlockRctCids[i], mockNonCanonicalBlock.Number().String()) - expectedNonCanonicalBlockRctsMap[nonCanonicalBlockRctCids[i].String()] = rctModel - } - - // expected receipts in the canonical block at London height + 1 - nonCanonicalBlock2RctCids := []cid.Cid{nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID} - expectedNonCanonicalBlock2RctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlock2Receipts)) - for i, mockNonCanonicalBlock2Rct := range mocks.MockNonCanonicalBlock2Receipts { - rctModel := createRctModel(mockNonCanonicalBlock2Rct, nonCanonicalBlock2RctCids[i], mockNonCanonicalBlock2.Number().String()) - expectedNonCanonicalBlock2RctsMap[nonCanonicalBlock2RctCids[i].String()] = rctModel - } - - require.Equal(t, len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+len(expectedNonCanonicalBlock2RctsMap), len(rctRes)) - - // sort results such that non-canonical block reciepts come after canonical block ones - sort.SliceStable(rctRes, func(i, j int) bool { - if rctRes[i].BlockNumber < rctRes[j].BlockNumber { - return true - } else if rctRes[i].HeaderID == rctRes[j].HeaderID { - return false - } else if rctRes[i].HeaderID == mockBlock.Hash().String() { - return true - } else { - return false - } - }) - - for i := 0; i < len(expectedBlockRctsMap); i++ { - rct := rctRes[i] - require.Contains(t, expectedBlockRctsMap, rct.LeafCID) - require.Equal(t, expectedBlockRctsMap[rct.LeafCID], rct) - } - - for i := 0; i < len(expectedNonCanonicalBlockRctsMap); i++ { - rct := rctRes[len(expectedBlockRctsMap)+i] - require.Contains(t, expectedNonCanonicalBlockRctsMap, rct.LeafCID) - require.Equal(t, expectedNonCanonicalBlockRctsMap[rct.LeafCID], rct) - } - - for i := 0; i < len(expectedNonCanonicalBlock2RctsMap); i++ { - rct := rctRes[len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+i] - require.Contains(t, expectedNonCanonicalBlock2RctsMap, rct.LeafCID) - require.Equal(t, expectedNonCanonicalBlock2RctsMap[rct.LeafCID], rct) - } - - // check indexed rct IPLD blocks - var data []byte - var prefixedKey string - - rctRLPs := [][]byte{ - rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5, - nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2, - } - for i, rctCid := range append(rctCids, nonCanonicalBlockRctCids...) { - prefixedKey = shared.MultihashKeyFromCID(rctCid) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, rctRLPs[i], data) - } - - nonCanonicalBlock2RctRLPs := [][]byte{nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2} - for i, rctCid := range nonCanonicalBlock2RctCids { - prefixedKey = shared.MultihashKeyFromCID(rctCid) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.Block2Number.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, nonCanonicalBlock2RctRLPs[i], data) - } -} - -func testPublishAndIndexLogsNonCanonical(t *testing.T) { - // check indexed logs - pgStr := `SELECT address, log_data, topic0, topic1, topic2, topic3, data - FROM eth.log_cids - INNER JOIN public.blocks ON (log_cids.block_number = blocks.block_number AND log_cids.leaf_mh_key = blocks.key) - WHERE log_cids.block_number = $1 AND header_id = $2 AND rct_id = $3 - ORDER BY log_cids.index ASC` - - type rctWithBlockHash struct { - rct *types.Receipt - blockHash string - blockNumber uint64 - } - mockRcts := make([]rctWithBlockHash, 0) - - // logs in the canonical block - for _, mockBlockRct := range mocks.MockReceipts { - mockRcts = append(mockRcts, rctWithBlockHash{ - mockBlockRct, - mockBlock.Hash().String(), - mockBlock.NumberU64(), - }) - } - - // logs in the canonical block at London height + 1 - for _, mockBlockRct := range mocks.MockNonCanonicalBlockReceipts { - mockRcts = append(mockRcts, rctWithBlockHash{ - mockBlockRct, - mockNonCanonicalBlock.Hash().String(), - mockNonCanonicalBlock.NumberU64(), - }) - } - - // logs in the canonical block at London height + 1 - for _, mockBlockRct := range mocks.MockNonCanonicalBlock2Receipts { - mockRcts = append(mockRcts, rctWithBlockHash{ - mockBlockRct, - mockNonCanonicalBlock2.Hash().String(), - mockNonCanonicalBlock2.NumberU64(), - }) - } - - for _, mockRct := range mockRcts { - type logWithIPLD struct { - models.LogsModel - IPLDData []byte `db:"data"` - } - logRes := make([]logWithIPLD, 0) - err = db.Select(context.Background(), &logRes, pgStr, mockRct.blockNumber, mockRct.blockHash, mockRct.rct.TxHash.String()) - require.NoError(t, err) - require.Equal(t, len(mockRct.rct.Logs), len(logRes)) - - for i, log := range mockRct.rct.Logs { - topicSet := make([]string, 4) - for ti, topic := range log.Topics { - topicSet[ti] = topic.Hex() - } - - expectedLog := models.LogsModel{ - Address: log.Address.String(), - Data: log.Data, - Topic0: topicSet[0], - Topic1: topicSet[1], - Topic2: topicSet[2], - Topic3: topicSet[3], - } - require.Equal(t, expectedLog, logRes[i].LogsModel) - - // check indexed log IPLD block - var nodeElements []interface{} - err = rlp.DecodeBytes(logRes[i].IPLDData, &nodeElements) - require.NoError(t, err) - - if len(nodeElements) == 2 { - logRaw, err := rlp.EncodeToBytes(log) - require.NoError(t, err) - // 2nd element of the leaf node contains the encoded log data. - require.Equal(t, nodeElements[1].([]byte), logRaw) - } else { - logRaw, err := rlp.EncodeToBytes(log) - require.NoError(t, err) - // raw log was IPLDized - require.Equal(t, logRes[i].IPLDData, logRaw) - } - } - } -} - -func testPublishAndIndexStateNonCanonical(t *testing.T) { - // check indexed state nodes - pgStr := `SELECT state_path, state_leaf_key, node_type, cid, mh_key, diff - FROM eth.state_cids - WHERE block_number = $1 - AND header_id = $2 - ORDER BY state_path` - - removedNodeCID, _ := cid.Decode(shared.RemovedNodeStateCID) - stateNodeCIDs := []cid.Cid{state1CID, state2CID, removedNodeCID, removedNodeCID} - - // expected state nodes in the canonical and the non-canonical block at London height - expectedStateNodes := make([]models.StateNodeModel, 0) - for i, stateDiff := range mocks.StateDiffs { - expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{ - Path: stateDiff.Path, - StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(), - NodeType: stateDiff.NodeType.Int(), - CID: stateNodeCIDs[i].String(), - MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]), - Diff: true, - }) - } - sort.Slice(expectedStateNodes, func(i, j int) bool { - if bytes.Compare(expectedStateNodes[i].Path, expectedStateNodes[j].Path) < 0 { - return true - } else { - return false - } - }) - - // expected state nodes in the non-canonical block at London height + 1 - expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0) - for i, stateDiff := range mocks.StateDiffs[:2] { - expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{ - Path: stateDiff.Path, - StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(), - NodeType: stateDiff.NodeType.Int(), - CID: stateNodeCIDs[i].String(), - MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]), - Diff: true, - }) - } - - // check state nodes for canonical block - stateNodes := make([]models.StateNodeModel, 0) - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedStateNodes), len(stateNodes)) - - for i, expectedStateNode := range expectedStateNodes { - require.Equal(t, expectedStateNode, stateNodes[i]) - } - - // check state nodes for non-canonical block at London height - stateNodes = make([]models.StateNodeModel, 0) - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedStateNodes), len(stateNodes)) - - for i, expectedStateNode := range expectedStateNodes { - require.Equal(t, expectedStateNode, stateNodes[i]) - } - - // check state nodes for non-canonical block at London height + 1 - stateNodes = make([]models.StateNodeModel, 0) - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.Block2Number.Uint64(), mockNonCanonicalBlock2.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedNonCanonicalBlock2StateNodes), len(stateNodes)) - - for i, expectedStateNode := range expectedNonCanonicalBlock2StateNodes { - require.Equal(t, expectedStateNode, stateNodes[i]) - } -} - -func testPublishAndIndexStorageNonCanonical(t *testing.T) { - // check indexed storage nodes - pgStr := `SELECT state_path, storage_path, storage_leaf_key, node_type, cid, mh_key, diff - FROM eth.storage_cids - WHERE block_number = $1 - AND header_id = $2 - ORDER BY state_path, storage_path` - - removedNodeCID, _ := cid.Decode(shared.RemovedNodeStorageCID) - storageNodeCIDs := []cid.Cid{storageCID, removedNodeCID, removedNodeCID, removedNodeCID} - - // expected state nodes in the canonical and the non-canonical block at London height - expectedStorageNodes := make([]models.StorageNodeModel, 0) - storageNodeIndex := 0 - for _, stateDiff := range mocks.StateDiffs { - for _, storageNode := range stateDiff.StorageNodes { - expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{ - StatePath: stateDiff.Path, - Path: storageNode.Path, - StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(), - NodeType: storageNode.NodeType.Int(), - CID: storageNodeCIDs[storageNodeIndex].String(), - MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]), - Diff: true, - }) - storageNodeIndex++ - } - } - sort.Slice(expectedStorageNodes, func(i, j int) bool { - if bytes.Compare(expectedStorageNodes[i].Path, expectedStorageNodes[j].Path) < 0 { - return true - } else { - return false - } - }) - - // expected state nodes in the non-canonical block at London height + 1 - expectedNonCanonicalBlock2StorageNodes := make([]models.StorageNodeModel, 0) - storageNodeIndex = 0 - for _, stateDiff := range mocks.StateDiffs[:2] { - for _, storageNode := range stateDiff.StorageNodes { - expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{ - StatePath: stateDiff.Path, - Path: storageNode.Path, - StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(), - NodeType: storageNode.NodeType.Int(), - CID: storageNodeCIDs[storageNodeIndex].String(), - MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]), - Diff: true, - }) - storageNodeIndex++ - } - } - - // check storage nodes for canonical block - storageNodes := make([]models.StorageNodeModel, 0) - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedStorageNodes), len(storageNodes)) - - for i, expectedStorageNode := range expectedStorageNodes { - require.Equal(t, expectedStorageNode, storageNodes[i]) - } - - // check storage nodes for non-canonical block at London height - storageNodes = make([]models.StorageNodeModel, 0) - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedStorageNodes), len(storageNodes)) - - for i, expectedStorageNode := range expectedStorageNodes { - require.Equal(t, expectedStorageNode, storageNodes[i]) - } - - // check storage nodes for non-canonical block at London height + 1 - storageNodes = make([]models.StorageNodeModel, 0) - err = db.Select(context.Background(), &storageNodes, pgStr, mockNonCanonicalBlock2.NumberU64(), mockNonCanonicalBlock2.Hash().String()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, len(expectedNonCanonicalBlock2StorageNodes), len(storageNodes)) - - for i, expectedStorageNode := range expectedNonCanonicalBlock2StorageNodes { - require.Equal(t, expectedStorageNode, storageNodes[i]) - } + test_helpers.TearDownDB(t, db) + require.NoError(t, ind.Close()) } diff --git a/statediff/indexer/database/sql/mainnet_tests/indexer_test.go b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go index 55f535aa8..ce57a74ac 100644 --- a/statediff/indexer/database/sql/mainnet_tests/indexer_test.go +++ b/statediff/indexer/database/sql/mainnet_tests/indexer_test.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test" "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" ) @@ -48,65 +48,48 @@ func init() { } } -func TestPushBlockAndState(t *testing.T) { - conf := test_helpers.DefaultTestConfig - rawURL := os.Getenv(test_helpers.TEST_RAW_URL) - if rawURL == "" { - fmt.Printf("Warning: no raw url configured for statediffing mainnet tests, will look for local file and"+ - "then try default endpoint (%s)\r\n", test_helpers.DefaultTestConfig.RawURL) - } else { - conf.RawURL = rawURL - } +func TestMainnetIndexer(t *testing.T) { + conf := test_helpers.GetTestConfig() + for _, blockNumber := range test_helpers.ProblemBlocks { conf.BlockNumber = big.NewInt(blockNumber) tb, trs, err := test_helpers.TestBlockAndReceipts(conf) require.NoError(t, err) + testPushBlockAndState(t, tb, trs) } + testBlock, testReceipts, err := test_helpers.TestBlockAndReceiptsFromEnv(conf) require.NoError(t, err) + testPushBlockAndState(t, testBlock, testReceipts) } func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Receipts) { t.Run("Test PushBlock and PushStateNode", func(t *testing.T) { - setup(t, block, receipts) - tearDown(t) + setupMainnetIndexer(t) + defer checkTxClosure(t, 0, 0, 0) + defer tearDown(t) + + test.TestBlock(t, ind, block, receipts) }) } -func setup(t *testing.T, testBlock *types.Block, testReceipts types.Receipts) { +func setupMainnetIndexer(t *testing.T) { db, err = postgres.SetupSQLXDB() if err != nil { t.Fatal(err) } ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, db) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - testBlock, - testReceipts, - testBlock.Difficulty()) - require.NoError(t, err) +} - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - }() - for _, node := range mocks.StateDiffs { - err = ind.PushStateNode(tx, node, testBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, testBlock.Number().String(), tx.(*sql.BatchTx).BlockNumber) +func checkTxClosure(t *testing.T, idle, inUse, open int64) { + require.Equal(t, idle, db.Stats().Idle()) + require.Equal(t, inUse, db.Stats().InUse()) + require.Equal(t, open, db.Stats().Open()) } func tearDown(t *testing.T) { - require.Equal(t, int64(0), db.Stats().Idle()) - require.Equal(t, int64(0), db.Stats().InUse()) - require.Equal(t, int64(0), db.Stats().Open()) - sql.TearDownDB(t, db) - err = ind.Close() - require.NoError(t, err) + test_helpers.TearDownDB(t, db) + require.NoError(t, ind.Close()) } diff --git a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go index bb3b36446..292548b75 100644 --- a/statediff/indexer/database/sql/pgx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_legacy_test.go @@ -20,42 +20,25 @@ import ( "context" "testing" - "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) -func setupLegacyPGX(t *testing.T) { - mockLegacyBlock = legacyData.MockBlock - legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - +func setupLegacyPGXIndexer(t *testing.T) { db, err = postgres.SetupPGXDB() - require.NoError(t, err) - - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockLegacyBlock, - legacyData.MockReceipts, - legacyData.MockBlock.Difficulty()) - require.NoError(t, err) - - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - }() - for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, legacyData.MockBlock.Hash().String()) - require.NoError(t, err) + if err != nil { + t.Fatal(err) } + ind, err = sql.NewStateDiffIndexer(context.Background(), test.LegacyConfig, db) + require.NoError(t, err) +} - require.Equal(t, legacyData.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber) +func setupLegacyPGX(t *testing.T) { + setupLegacyPGXIndexer(t) + test.SetupLegacyTestData(t, ind) } func TestLegacyPGXIndexer(t *testing.T) { @@ -63,27 +46,7 @@ func TestLegacyPGXIndexer(t *testing.T) { setupLegacyPGX(t) defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).Scan( - &header.CID, &header.TD, &header.Reward, &header.BlockHash, &header.Coinbase) - require.NoError(t, err) - - require.Equal(t, legacyHeaderCID.String(), header.CID) - require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "5000000000000011250", header.Reward) - require.Equal(t, legacyData.MockHeader.Coinbase.String(), header.Coinbase) - require.Nil(t, legacyData.MockHeader.BaseFee) + test.TestLegacyIndexer(t, db) }) } diff --git a/statediff/indexer/database/sql/pgx_indexer_test.go b/statediff/indexer/database/sql/pgx_indexer_test.go index 9087578e4..1dbf2dfa0 100644 --- a/statediff/indexer/database/sql/pgx_indexer_test.go +++ b/statediff/indexer/database/sql/pgx_indexer_test.go @@ -21,21 +21,12 @@ import ( "math/big" "testing" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - dshelp "github.com/ipfs/go-ipfs-ds-help" "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" - "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" - sdtypes "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) func setupPGXIndexer(t *testing.T) { @@ -49,12 +40,12 @@ func setupPGXIndexer(t *testing.T) { func setupPGX(t *testing.T) { setupPGXIndexer(t) - setupTestData(t) + test.SetupTestData(t, ind) } func setupPGXNonCanonical(t *testing.T) { setupPGXIndexer(t) - setupTestDataNonCanonical(t) + test.SetupTestDataNonCanonical(t, ind) } // Test indexer for a canonical block @@ -63,174 +54,16 @@ func TestPGXIndexer(t *testing.T) { setupPGX(t) defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).Scan( - &header.CID, - &header.TD, - &header.Reward, - &header.BlockHash, - &header.Coinbase) - if err != nil { - t.Fatal(err) - } - require.Equal(t, headerCID.String(), header.CID) - require.Equal(t, mocks.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "2000000000000021250", header.Reward) - require.Equal(t, mocks.MockHeader.Coinbase.String(), header.Coinbase) - dc, err := cid.Decode(header.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.MockHeaderRlp, data) + + test.TestPublishAndIndexHeaderIPLDs(t, db) }) t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) { setupPGX(t) defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - // check that txs were properly indexed and published - trxs := make([]string, 0) - pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1` - err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(trxs)) - expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String())) - transactions := mocks.MockBlock.Transactions() - type txResult struct { - TxType uint8 `db:"tx_type"` - Value string - } - for _, c := range trxs { - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - txTypeAndValueStr := `SELECT tx_type, CAST(value as TEXT) FROM eth.transaction_cids WHERE cid = $1` - switch c { - case trx1CID.String(): - require.Equal(t, tx1, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[0].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value) - } - case trx2CID.String(): - require.Equal(t, tx2, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[1].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value) - } - case trx3CID.String(): - require.Equal(t, tx3, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[2].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value) - } - case trx4CID.String(): - require.Equal(t, tx4, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.AccessListTxType { - t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType) - } - if txRes.Value != transactions[3].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) - } - accessListElementModels := make([]models.AccessListElementModel, 0) - pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " + - "access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " + - "INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC" - err = db.Select(context.Background(), &accessListElementModels, pgStr, c) - if err != nil { - t.Fatal(err) - } - if len(accessListElementModels) != 2 { - t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) - } - model1 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[0].Index, - Address: accessListElementModels[0].Address, - } - model2 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[1].Index, - Address: accessListElementModels[1].Address, - StorageKeys: accessListElementModels[1].StorageKeys, - } - require.Equal(t, mocks.AccessListEntry1Model, model1) - require.Equal(t, mocks.AccessListEntry2Model, model2) - case trx5CID.String(): - require.Equal(t, tx5, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.DynamicFeeTxType { - t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType) - } - if txRes.Value != transactions[4].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value) - } - } - } + test.TestPublishAndIndexTransactionIPLDs(t, db) }) t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) { @@ -238,57 +71,7 @@ func TestPGXIndexer(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - rcts := make([]string, 0) - rctsPgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - ORDER BY transaction_cids.index` - logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids - INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id) - INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) - WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` - err = db.Select(context.Background(), &rcts, rctsPgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - if len(rcts) != len(mocks.MockReceipts) { - t.Fatalf("expected %d receipts, got %d", len(mocks.MockReceipts), len(rcts)) - } - - type logIPLD struct { - Index int `db:"index"` - Address string `db:"address"` - Data []byte `db:"data"` - Topic0 string `db:"topic0"` - Topic1 string `db:"topic1"` - } - for i := range rcts { - results := make([]logIPLD, 0) - err = db.Select(context.Background(), &results, logsPgStr, rcts[i]) - require.NoError(t, err) - - expectedLogs := mocks.MockReceipts[i].Logs - require.Equal(t, len(expectedLogs), len(results)) - - var nodeElements []interface{} - for idx, r := range results { - // Attempt to decode the log leaf node. - err = rlp.DecodeBytes(r.Data, &nodeElements) - require.NoError(t, err) - if len(nodeElements) == 2 { - logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) - require.NoError(t, err) - // 2nd element of the leaf node contains the encoded log data. - require.Equal(t, nodeElements[1].([]byte), logRaw) - } else { - logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) - require.NoError(t, err) - // raw log was IPLDized - require.Equal(t, r.Data, logRaw) - } - } - } + test.TestPublishAndIndexLogIPLDs(t, db) }) t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) { @@ -296,303 +79,23 @@ func TestPGXIndexer(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - // check receipts were properly indexed and published - rcts := make([]string, 0) - pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 order by transaction_cids.index` - err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(rcts)) - expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) - - for idx, c := range rcts { - result := make([]models.IPLDModel, 0) - pgStr = `SELECT data - FROM eth.receipt_cids - INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) - WHERE receipt_cids.leaf_cid = $1` - err = db.Select(context.Background(), &result, pgStr, c) - if err != nil { - t.Fatal(err) - } - - // Decode the receipt leaf node. - var nodeElements []interface{} - err = rlp.DecodeBytes(result[0].Data, &nodeElements) - require.NoError(t, err) - - expectedRct, err := mocks.MockReceipts[idx].MarshalBinary() - require.NoError(t, err) - - require.Equal(t, nodeElements[1].([]byte), expectedRct) - - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - - postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - switch c { - case rct1CID.String(): - require.Equal(t, rctLeaf1, data) - var postStatus uint64 - pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(context.Background(), &postStatus, pgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostStatus, postStatus) - case rct2CID.String(): - require.Equal(t, rctLeaf2, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState1, postState) - case rct3CID.String(): - require.Equal(t, rctLeaf3, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState2, postState) - case rct4CID.String(): - require.Equal(t, rctLeaf4, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - case rct5CID.String(): - require.Equal(t, rctLeaf5, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - } - } + test.TestPublishAndIndexReceiptIPLDs(t, db) }) t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) { setupPGX(t) defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) - pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type != 3` - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for _, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - pgStr = `SELECT cast(block_number AS TEXT), header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel - err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path) - if err != nil { - t.Fatal(err) - } - if stateNode.CID == state1CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x06'}, stateNode.Path) - require.Equal(t, mocks.ContractLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "0", - CodeHash: mocks.ContractCodeHash.Bytes(), - StorageRoot: mocks.ContractRoot, - Nonce: 1, - }, account) - } - if stateNode.CID == state2CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x0c'}, stateNode.Path) - require.Equal(t, mocks.AccountLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "1000", - CodeHash: mocks.AccountCodeHash.Bytes(), - StorageRoot: mocks.AccountRoot, - Nonce: 0, - }, account) - } - } - // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) - pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type = 3` - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for idx, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - - if idx == 0 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.RemovedLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x02'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - if idx == 1 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.Contract2LeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x07'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - } + test.TestPublishAndIndexStateIPLDs(t, db) }) t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) { setupPGX(t) defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) - pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type != 3 - ORDER BY storage_path` - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 1, len(storageNodes)) - require.Equal(t, models.StorageNodeWithStateKeyModel{ - BlockNumber: mocks.BlockNumber.String(), - CID: storageCID.String(), - NodeType: 2, - StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{}, - }, storageNodes[0]) - var data []byte - dc, err := cid.Decode(storageNodes[0].CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.StorageLeafNode, data) - // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) - pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type = 3` - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 3, len(storageNodes)) - gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3) - for _, model := range storageNodes { - gotStorageNodes[model.StorageKey] = model - } - expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{ - common.BytesToHash(mocks.RemovedLeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{'\x03'}, - }, - common.BytesToHash(mocks.Storage2LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0e'}, - }, - common.BytesToHash(mocks.Storage3LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0f'}, - }, - } - for storageKey, storageNode := range gotStorageNodes { - require.Equal(t, expectedStorageNodes[storageKey], storageNode) - dc, err = cid.Decode(storageNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey = dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, []byte{}, data) - } + test.TestPublishAndIndexStorageIPLDs(t, db) }) } @@ -603,7 +106,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexHeaderNonCanonical(t) + test.TestPublishAndIndexHeaderNonCanonical(t, db) }) t.Run("Publish and index transactions", func(t *testing.T) { @@ -611,7 +114,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexTransactionsNonCanonical(t) + test.TestPublishAndIndexTransactionsNonCanonical(t, db) }) t.Run("Publish and index receipts", func(t *testing.T) { @@ -619,7 +122,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexReceiptsNonCanonical(t) + test.TestPublishAndIndexReceiptsNonCanonical(t, db) }) t.Run("Publish and index logs", func(t *testing.T) { @@ -627,7 +130,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexLogsNonCanonical(t) + test.TestPublishAndIndexLogsNonCanonical(t, db) }) t.Run("Publish and index state nodes", func(t *testing.T) { @@ -635,7 +138,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexStateNonCanonical(t) + test.TestPublishAndIndexStateNonCanonical(t, db) }) t.Run("Publish and index storage nodes", func(t *testing.T) { @@ -643,7 +146,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - testPublishAndIndexStorageNonCanonical(t) + test.TestPublishAndIndexStorageNonCanonical(t, db) }) } @@ -652,328 +155,73 @@ func TestPGXWatchAddressMethods(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 1, 0, 1) - type res struct { - Address string `db:"address"` - CreatedAt uint64 `db:"created_at"` - WatchedAt uint64 `db:"watched_at"` - LastFilledAt uint64 `db:"last_filled_at"` - } - pgStr := "SELECT * FROM eth_meta.watched_addresses" - t.Run("Load watched addresses (empty table)", func(t *testing.T) { - expectedData := []common.Address{} - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestLoadEmptyWatchedAddresses(t, ind) }) t.Run("Insert watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1))) + args := mocks.GetInsertWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestInsertWatchedAddresses(t, db) }) t.Run("Insert watched addresses (some already watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2))) + args := mocks.GetInsertAlreadyWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestInsertAlreadyWatchedAddresses(t, db) }) t.Run("Remove watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - + args := mocks.GetRemoveWatchedAddressesArgs() err = ind.RemoveWatchedAddresses(args) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestRemoveWatchedAddresses(t, db) }) t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{} - + args := mocks.GetRemoveNonWatchedAddressesArgs() err = ind.RemoveWatchedAddresses(args) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestRemoveNonWatchedAddresses(t, db) }) t.Run("Set watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2))) + args := mocks.GetSetWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestSetWatchedAddresses(t, db) }) t.Run("Set watched addresses (some already watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3))) + args := mocks.GetSetAlreadyWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestSetAlreadyWatchedAddresses(t, db) }) t.Run("Load watched addresses", func(t *testing.T) { - expectedData := []common.Address{ - common.HexToAddress(contract4Address), - common.HexToAddress(contract2Address), - common.HexToAddress(contract3Address), - } - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestLoadWatchedAddresses(t, ind) }) t.Run("Clear watched addresses", func(t *testing.T) { - expectedData := []res{} - err = ind.ClearWatchedAddresses() require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestClearWatchedAddresses(t, db) }) t.Run("Clear watched addresses (empty table)", func(t *testing.T) { - expectedData := []res{} - err = ind.ClearWatchedAddresses() require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestClearEmptyWatchedAddresses(t, db) }) } diff --git a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go index daaa1550c..4a07b8a0e 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_legacy_test.go @@ -20,52 +20,25 @@ import ( "context" "testing" - "github.com/ipfs/go-cid" - "github.com/jmoiron/sqlx" - "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" - "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" - "github.com/ethereum/go-ethereum/statediff/indexer/ipld" - "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) -var ( - legacyData = mocks.NewLegacyData() - mockLegacyBlock *types.Block - legacyHeaderCID cid.Cid -) +func setupLegacySQLXIndexer(t *testing.T) { + db, err = postgres.SetupSQLXDB() + if err != nil { + t.Fatal(err) + } + ind, err = sql.NewStateDiffIndexer(context.Background(), test.LegacyConfig, db) + require.NoError(t, err) +} func setupLegacySQLX(t *testing.T) { - mockLegacyBlock = legacyData.MockBlock - legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) - - db, err = postgres.SetupSQLXDB() - require.NoError(t, err) - - ind, err = sql.NewStateDiffIndexer(context.Background(), legacyData.Config, db) - require.NoError(t, err) - var tx interfaces.Batch - tx, err = ind.PushBlock( - mockLegacyBlock, - legacyData.MockReceipts, - legacyData.MockBlock.Difficulty()) - require.NoError(t, err) - - defer func() { - if err := tx.Submit(err); err != nil { - t.Fatal(err) - } - }() - for _, node := range legacyData.StateDiffs { - err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String()) - require.NoError(t, err) - } - - require.Equal(t, legacyData.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber) + setupLegacySQLXIndexer(t) + test.SetupLegacyTestData(t, ind) } func TestLegacySQLXIndexer(t *testing.T) { @@ -73,25 +46,7 @@ func TestLegacySQLXIndexer(t *testing.T) { setupLegacySQLX(t) defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - pgStr := `SELECT cid, td, reward, block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).(*sqlx.Row).StructScan(header) - require.NoError(t, err) - require.Equal(t, legacyHeaderCID.String(), header.CID) - require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "5000000000000011250", header.Reward) - require.Equal(t, legacyData.MockHeader.Coinbase.String(), header.Coinbase) - require.Nil(t, legacyData.MockHeader.BaseFee) + test.TestLegacyIndexer(t, db) }) } diff --git a/statediff/indexer/database/sql/sqlx_indexer_test.go b/statediff/indexer/database/sql/sqlx_indexer_test.go index f894660a7..fa8844655 100644 --- a/statediff/indexer/database/sql/sqlx_indexer_test.go +++ b/statediff/indexer/database/sql/sqlx_indexer_test.go @@ -21,22 +21,12 @@ import ( "math/big" "testing" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - dshelp "github.com/ipfs/go-ipfs-ds-help" - "github.com/jmoiron/sqlx" "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/mocks" - "github.com/ethereum/go-ethereum/statediff/indexer/models" - "github.com/ethereum/go-ethereum/statediff/indexer/shared" - "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" - sdtypes "github.com/ethereum/go-ethereum/statediff/types" + "github.com/ethereum/go-ethereum/statediff/indexer/test" ) func setupSQLXIndexer(t *testing.T) { @@ -50,12 +40,12 @@ func setupSQLXIndexer(t *testing.T) { func setupSQLX(t *testing.T) { setupSQLXIndexer(t) - setupTestData(t) + test.SetupTestData(t, ind) } func setupSQLXNonCanonical(t *testing.T) { - setupPGXIndexer(t) - setupTestDataNonCanonical(t) + setupSQLXIndexer(t) + test.SetupTestDataNonCanonical(t, ind) } // Test indexer for a canonical block @@ -64,169 +54,16 @@ func TestSQLXIndexer(t *testing.T) { setupSQLX(t) defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - pgStr := `SELECT cid, td, reward, block_hash, coinbase - FROM eth.header_cids - WHERE block_number = $1` - // check header was properly indexed - type res struct { - CID string - TD string - Reward string - BlockHash string `db:"block_hash"` - Coinbase string `db:"coinbase"` - } - header := new(res) - err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).(*sqlx.Row).StructScan(header) - if err != nil { - t.Fatal(err) - } - require.Equal(t, headerCID.String(), header.CID) - require.Equal(t, mocks.MockBlock.Difficulty().String(), header.TD) - require.Equal(t, "2000000000000021250", header.Reward) - require.Equal(t, mocks.MockHeader.Coinbase.String(), header.Coinbase) - dc, err := cid.Decode(header.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.MockHeaderRlp, data) + + test.TestPublishAndIndexHeaderIPLDs(t, db) }) t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) { setupSQLX(t) defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - // check that txs were properly indexed and published - trxs := make([]string, 0) - pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1` - err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(trxs)) - expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String())) - expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String())) - transactions := mocks.MockBlock.Transactions() - type txResult struct { - TxType uint8 `db:"tx_type"` - Value string - } - for _, c := range trxs { - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - txTypeAndValueStr := `SELECT tx_type, value FROM eth.transaction_cids WHERE cid = $1` - switch c { - case trx1CID.String(): - require.Equal(t, tx1, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[0].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value) - } - case trx2CID.String(): - require.Equal(t, tx2, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[1].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value) - } - case trx3CID.String(): - require.Equal(t, tx3, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != 0 { - t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) - } - if txRes.Value != transactions[2].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value) - } - case trx4CID.String(): - require.Equal(t, tx4, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.AccessListTxType { - t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType) - } - if txRes.Value != transactions[3].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) - } - accessListElementModels := make([]models.AccessListElementModel, 0) - pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " + - "access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " + - "INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC" - err = db.Select(context.Background(), &accessListElementModels, pgStr, c) - if err != nil { - t.Fatal(err) - } - if len(accessListElementModels) != 2 { - t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) - } - model1 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[0].Index, - Address: accessListElementModels[0].Address, - } - model2 := models.AccessListElementModel{ - BlockNumber: mocks.BlockNumber.String(), - Index: accessListElementModels[1].Index, - Address: accessListElementModels[1].Address, - StorageKeys: accessListElementModels[1].StorageKeys, - } - require.Equal(t, mocks.AccessListEntry1Model, model1) - require.Equal(t, mocks.AccessListEntry2Model, model2) - case trx5CID.String(): - require.Equal(t, tx5, data) - txRes := new(txResult) - err = db.QueryRow(context.Background(), txTypeAndValueStr, c).(*sqlx.Row).StructScan(txRes) - if err != nil { - t.Fatal(err) - } - if txRes.TxType != types.DynamicFeeTxType { - t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType) - } - if txRes.Value != transactions[4].Value().String() { - t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value) - } - } - } + test.TestPublishAndIndexTransactionIPLDs(t, db) }) t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) { @@ -234,55 +71,7 @@ func TestSQLXIndexer(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - rcts := make([]string, 0) - rctsPgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - ORDER BY transaction_cids.index` - logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids - INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id) - INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) - WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` - err = db.Select(context.Background(), &rcts, rctsPgStr, mocks.BlockNumber.Uint64()) - require.NoError(t, err) - if len(rcts) != len(mocks.MockReceipts) { - t.Fatalf("expected %d receipts, got %d", len(mocks.MockReceipts), len(rcts)) - } - - type logIPLD struct { - Index int `db:"index"` - Address string `db:"address"` - Data []byte `db:"data"` - Topic0 string `db:"topic0"` - Topic1 string `db:"topic1"` - } - for i := range rcts { - results := make([]logIPLD, 0) - err = db.Select(context.Background(), &results, logsPgStr, rcts[i]) - require.NoError(t, err) - - expectedLogs := mocks.MockReceipts[i].Logs - require.Equal(t, len(expectedLogs), len(results)) - - var nodeElements []interface{} - for idx, r := range results { - // Attempt to decode the log leaf node. - err = rlp.DecodeBytes(r.Data, &nodeElements) - require.NoError(t, err) - if len(nodeElements) == 2 { - logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) - require.NoError(t, err) - // 2nd element of the leaf node contains the encoded log data. - require.Equal(t, nodeElements[1].([]byte), logRaw) - } else { - logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) - require.NoError(t, err) - // raw log was IPLDized - require.Equal(t, r.Data, logRaw) - } - } - } + test.TestPublishAndIndexLogIPLDs(t, db) }) t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) { @@ -290,302 +79,23 @@ func TestSQLXIndexer(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - // check receipts were properly indexed and published - rcts := make([]string, 0) - pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids - WHERE receipt_cids.tx_id = transaction_cids.tx_hash - AND transaction_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 order by transaction_cids.index` - err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 5, len(rcts)) - expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String())) - expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) - - for idx, c := range rcts { - result := make([]models.IPLDModel, 0) - pgStr = `SELECT data - FROM eth.receipt_cids - INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) - WHERE receipt_cids.leaf_cid = $1` - err = db.Select(context.Background(), &result, pgStr, c) - if err != nil { - t.Fatal(err) - } - - // Decode the receipt leaf node. - var nodeElements []interface{} - err = rlp.DecodeBytes(result[0].Data, &nodeElements) - require.NoError(t, err) - - expectedRct, err := mocks.MockReceipts[idx].MarshalBinary() - require.NoError(t, err) - - require.Equal(t, nodeElements[1].([]byte), expectedRct) - - dc, err := cid.Decode(c) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - var data []byte - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` - switch c { - case rct1CID.String(): - require.Equal(t, rctLeaf1, data) - var postStatus uint64 - pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` - err = db.Get(context.Background(), &postStatus, pgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostStatus, postStatus) - case rct2CID.String(): - require.Equal(t, rctLeaf2, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState1, postState) - case rct3CID.String(): - require.Equal(t, rctLeaf3, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState2, postState) - case rct4CID.String(): - require.Equal(t, rctLeaf4, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - case rct5CID.String(): - require.Equal(t, rctLeaf5, data) - var postState string - err = db.Get(context.Background(), &postState, postStatePgStr, c) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.ExpectedPostState3, postState) - } - } + test.TestPublishAndIndexReceiptIPLDs(t, db) }) t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) { setupSQLX(t) defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - // check that state nodes were properly indexed and published - stateNodes := make([]models.StateNodeModel, 0) - pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type != 3` - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for _, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - pgStr = `SELECT * from eth.state_accounts WHERE header_id = $1 AND state_path = $2` - var account models.StateAccountModel - err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path) - if err != nil { - t.Fatal(err) - } - if stateNode.CID == state1CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x06'}, stateNode.Path) - require.Equal(t, mocks.ContractLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "0", - CodeHash: mocks.ContractCodeHash.Bytes(), - StorageRoot: mocks.ContractRoot, - Nonce: 1, - }, account) - } - if stateNode.CID == state2CID.String() { - require.Equal(t, 2, stateNode.NodeType) - require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x0c'}, stateNode.Path) - require.Equal(t, mocks.AccountLeafNode, data) - require.Equal(t, models.StateAccountModel{ - BlockNumber: mocks.BlockNumber.String(), - HeaderID: account.HeaderID, - StatePath: stateNode.Path, - Balance: "1000", - CodeHash: mocks.AccountCodeHash.Bytes(), - StorageRoot: mocks.AccountRoot, - Nonce: 0, - }, account) - } - } - // check that Removed state nodes were properly indexed and published - stateNodes = make([]models.StateNodeModel, 0) - pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id - FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) - WHERE header_cids.block_number = $1 AND node_type = 3` - err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 2, len(stateNodes)) - for idx, stateNode := range stateNodes { - var data []byte - dc, err := cid.Decode(stateNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - - if idx == 0 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.RemovedLeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x02'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - if idx == 1 { - require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) - require.Equal(t, common.BytesToHash(mocks.Contract2LeafKey).Hex(), stateNode.StateKey) - require.Equal(t, []byte{'\x07'}, stateNode.Path) - require.Equal(t, []byte{}, data) - } - } + test.TestPublishAndIndexStateIPLDs(t, db) }) t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) { setupSQLX(t) defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - // check that storage nodes were properly indexed - storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) - pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type != 3 - ORDER BY storage_path` - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 1, len(storageNodes)) - require.Equal(t, models.StorageNodeWithStateKeyModel{ - BlockNumber: mocks.BlockNumber.String(), - CID: storageCID.String(), - NodeType: 2, - StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{}, - }, storageNodes[0]) - var data []byte - dc, err := cid.Decode(storageNodes[0].CID) - if err != nil { - t.Fatal(err) - } - mhKey := dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, mocks.StorageLeafNode, data) - // check that Removed storage nodes were properly indexed - storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) - pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path - FROM eth.storage_cids, eth.state_cids, eth.header_cids - WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) - AND state_cids.header_id = header_cids.block_hash - AND header_cids.block_number = $1 - AND storage_cids.node_type = 3` - err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 3, len(storageNodes)) - gotStorageNodes := make(map[string]models.StorageNodeWithStateKeyModel, 3) - for _, model := range storageNodes { - gotStorageNodes[model.StorageKey] = model - } - expectedStorageNodes := map[string]models.StorageNodeWithStateKeyModel{ - common.BytesToHash(mocks.RemovedLeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), - StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), - Path: []byte{'\x03'}, - }, - common.BytesToHash(mocks.Storage2LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0e'}, - }, - common.BytesToHash(mocks.Storage3LeafKey).Hex(): { - BlockNumber: mocks.BlockNumber.String(), - CID: shared.RemovedNodeStorageCID, - NodeType: 3, - StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(), - StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), - Path: []byte{'\x0f'}, - }, - } - for storageKey, storageNode := range gotStorageNodes { - require.Equal(t, expectedStorageNodes[storageKey], storageNode) - dc, err = cid.Decode(storageNode.CID) - if err != nil { - t.Fatal(err) - } - mhKey = dshelp.MultihashToDsKey(dc.Hash()) - prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() - require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) - err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) - if err != nil { - t.Fatal(err) - } - require.Equal(t, []byte{}, data) - } + test.TestPublishAndIndexStorageIPLDs(t, db) }) } @@ -594,49 +104,49 @@ func TestSQLXIndexerNonCanonical(t *testing.T) { t.Run("Publish and index header", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexHeaderNonCanonical(t) + test.TestPublishAndIndexHeaderNonCanonical(t, db) }) t.Run("Publish and index transactions", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexTransactionsNonCanonical(t) + test.TestPublishAndIndexTransactionsNonCanonical(t, db) }) t.Run("Publish and index receipts", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexReceiptsNonCanonical(t) + test.TestPublishAndIndexReceiptsNonCanonical(t, db) }) t.Run("Publish and index logs", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexLogsNonCanonical(t) + test.TestPublishAndIndexLogsNonCanonical(t, db) }) t.Run("Publish and index state nodes", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexStateNonCanonical(t) + test.TestPublishAndIndexStateNonCanonical(t, db) }) t.Run("Publish and index storage nodes", func(t *testing.T) { setupSQLXNonCanonical(t) defer tearDown(t) - defer checkTxClosure(t, 1, 0, 1) + defer checkTxClosure(t, 0, 0, 0) - testPublishAndIndexStorageNonCanonical(t) + test.TestPublishAndIndexStorageNonCanonical(t, db) }) } @@ -645,328 +155,73 @@ func TestSQLXWatchAddressMethods(t *testing.T) { defer tearDown(t) defer checkTxClosure(t, 0, 0, 0) - type res struct { - Address string `db:"address"` - CreatedAt uint64 `db:"created_at"` - WatchedAt uint64 `db:"watched_at"` - LastFilledAt uint64 `db:"last_filled_at"` - } - pgStr := "SELECT * FROM eth_meta.watched_addresses" - t.Run("Load watched addresses (empty table)", func(t *testing.T) { - expectedData := []common.Address{} - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestLoadEmptyWatchedAddresses(t, ind) }) t.Run("Insert watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1))) + args := mocks.GetInsertWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestInsertWatchedAddresses(t, db) }) t.Run("Insert watched addresses (some already watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2))) + args := mocks.GetInsertAlreadyWatchedAddressesArgs() + err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestInsertAlreadyWatchedAddresses(t, db) }) t.Run("Remove watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt1, - LastFilledAt: lastFilledAt, - }, - } - + args := mocks.GetRemoveWatchedAddressesArgs() err = ind.RemoveWatchedAddresses(args) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestRemoveWatchedAddresses(t, db) }) t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - } - expectedData := []res{} - + args := mocks.GetRemoveNonWatchedAddressesArgs() err = ind.RemoveWatchedAddresses(args) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestRemoveNonWatchedAddresses(t, db) }) t.Run("Set watched addresses", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract1Address, - CreatedAt: contract1CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt2, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2))) + args := mocks.GetSetWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestSetWatchedAddresses(t, db) }) t.Run("Set watched addresses (some already watched)", func(t *testing.T) { - args := []sdtypes.WatchAddressArg{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - }, - } - expectedData := []res{ - { - Address: contract4Address, - CreatedAt: contract4CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract2Address, - CreatedAt: contract2CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - { - Address: contract3Address, - CreatedAt: contract3CreatedAt, - WatchedAt: watchedAt3, - LastFilledAt: lastFilledAt, - }, - } - - err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3))) + args := mocks.GetSetAlreadyWatchedAddressesArgs() + err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3))) require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestSetAlreadyWatchedAddresses(t, db) }) t.Run("Load watched addresses", func(t *testing.T) { - expectedData := []common.Address{ - common.HexToAddress(contract4Address), - common.HexToAddress(contract2Address), - common.HexToAddress(contract3Address), - } - - rows, err := ind.LoadWatchedAddresses() - require.NoError(t, err) - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestLoadWatchedAddresses(t, ind) }) t.Run("Clear watched addresses", func(t *testing.T) { - expectedData := []res{} - err = ind.ClearWatchedAddresses() require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestClearWatchedAddresses(t, db) }) t.Run("Clear watched addresses (empty table)", func(t *testing.T) { - expectedData := []res{} - err = ind.ClearWatchedAddresses() require.NoError(t, err) - rows := []res{} - err = db.Select(context.Background(), &rows, pgStr) - if err != nil { - t.Fatal(err) - } - - expectTrue(t, len(rows) == len(expectedData)) - for idx, row := range rows { - require.Equal(t, expectedData[idx], row) - } + test.TestClearEmptyWatchedAddresses(t, db) }) } diff --git a/statediff/indexer/database/sql/test_helpers.go b/statediff/indexer/database/sql/test_helpers.go deleted file mode 100644 index 398258a0e..000000000 --- a/statediff/indexer/database/sql/test_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// VulcanizeDB -// Copyright © 2019 Vulcanize - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. - -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package sql - -import ( - "context" - "testing" -) - -// TearDownDB is used to tear down the watcher dbs after tests -func TearDownDB(t *testing.T, db Database) { - ctx := context.Background() - tx, err := db.Begin(ctx) - if err != nil { - t.Fatal(err) - } - - _, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.uncle_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.state_accounts`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.access_list_elements`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth.log_cids`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM blocks`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM nodes`) - if err != nil { - t.Fatal(err) - } - _, err = tx.Exec(ctx, `DELETE FROM eth_meta.watched_addresses`) - if err != nil { - t.Fatal(err) - } - err = tx.Commit(ctx) - if err != nil { - t.Fatal(err) - } -} diff --git a/statediff/indexer/mocks/test_data.go b/statediff/indexer/mocks/test_data.go index ceb45233c..eaa16c1fc 100644 --- a/statediff/indexer/mocks/test_data.go +++ b/statediff/indexer/mocks/test_data.go @@ -255,6 +255,21 @@ var ( }, }, } + + // Mock data for testing watched addresses methods + Contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F" + Contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B" + Contract3Address = "0xcfeB164C328CA13EFd3C77E1980d94975aDfedfc" + Contract4Address = "0x0Edf0c4f393a628DE4828B228C48175b3EA297fc" + Contract1CreatedAt = uint64(1) + Contract2CreatedAt = uint64(2) + Contract3CreatedAt = uint64(3) + Contract4CreatedAt = uint64(4) + + LastFilledAt = uint64(0) + WatchedAt1 = uint64(10) + WatchedAt2 = uint64(15) + WatchedAt3 = uint64(20) ) type LegacyData struct { @@ -284,8 +299,7 @@ type LegacyData struct { StateDiffs []sdtypes.StateNode } -func NewLegacyData() *LegacyData { - config := params.MainnetChainConfig +func NewLegacyData(config *params.ChainConfig) *LegacyData { // Block number before london fork. blockNumber := config.EIP155Block @@ -506,3 +520,90 @@ func createNonCanonicalBlockReceipts(config *params.ChainConfig, blockNumber *bi return types.Receipts{mockReceipt0, mockReceipt1} } + +// Helper methods for testing watched addresses methods +func GetInsertWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract1Address, + CreatedAt: Contract1CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + } +} + +func GetInsertAlreadyWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract3Address, + CreatedAt: Contract3CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + } +} + +func GetRemoveWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract3Address, + CreatedAt: Contract3CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + } +} + +func GetRemoveNonWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract1Address, + CreatedAt: Contract1CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + } +} + +func GetSetWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract1Address, + CreatedAt: Contract1CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + { + Address: Contract3Address, + CreatedAt: Contract3CreatedAt, + }, + } +} + +func GetSetAlreadyWatchedAddressesArgs() []sdtypes.WatchAddressArg { + return []sdtypes.WatchAddressArg{ + { + Address: Contract4Address, + CreatedAt: Contract4CreatedAt, + }, + { + Address: Contract2Address, + CreatedAt: Contract2CreatedAt, + }, + { + Address: Contract3Address, + CreatedAt: Contract3CreatedAt, + }, + } +} diff --git a/statediff/indexer/test/test.go b/statediff/indexer/test/test.go new file mode 100644 index 000000000..dedcd3655 --- /dev/null +++ b/statediff/indexer/test/test.go @@ -0,0 +1,1274 @@ +// VulcanizeDB +// Copyright © 2022 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test + +import ( + "bytes" + "context" + "sort" + "testing" + + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/statediff/indexer/database/file" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/shared" + "github.com/ethereum/go-ethereum/statediff/indexer/test_helpers" +) + +// SetupTestData indexes a single mock block along with it's state nodes +func SetupTestData(t *testing.T, ind interfaces.StateDiffIndexer) { + var tx interfaces.Batch + tx, err = ind.PushBlock( + mockBlock, + mocks.MockReceipts, + mocks.MockBlock.Difficulty()) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := tx.Submit(err); err != nil { + t.Fatal(err) + } + }() + for _, node := range mocks.StateDiffs { + err = ind.PushStateNode(tx, node, mockBlock.Hash().String()) + require.NoError(t, err) + } + + if batchTx, ok := tx.(*sql.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), batchTx.BlockNumber) + } else if batchTx, ok := tx.(*file.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), batchTx.BlockNumber) + } +} + +func TestPublishAndIndexHeaderIPLDs(t *testing.T, db sql.Database) { + pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase + FROM eth.header_cids + WHERE block_number = $1` + // check header was properly indexed + type res struct { + CID string + TD string + Reward string + BlockHash string `db:"block_hash"` + Coinbase string `db:"coinbase"` + } + header := new(res) + err = db.QueryRow(context.Background(), pgStr, mocks.BlockNumber.Uint64()).Scan( + &header.CID, + &header.TD, + &header.Reward, + &header.BlockHash, + &header.Coinbase) + if err != nil { + t.Fatal(err) + } + require.Equal(t, headerCID.String(), header.CID) + require.Equal(t, mocks.MockBlock.Difficulty().String(), header.TD) + require.Equal(t, "2000000000000021250", header.Reward) + require.Equal(t, mocks.MockHeader.Coinbase.String(), header.Coinbase) + dc, err := cid.Decode(header.CID) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + var data []byte + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.MockHeaderRlp, data) +} + +func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) { + // check that txs were properly indexed and published + trxs := make([]string, 0) + pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) + WHERE header_cids.block_number = $1` + err = db.Select(context.Background(), &trxs, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 5, len(trxs)) + expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String())) + expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String())) + + transactions := mocks.MockBlock.Transactions() + type txResult struct { + TxType uint8 `db:"tx_type"` + Value string + } + for _, c := range trxs { + dc, err := cid.Decode(c) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + var data []byte + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + txTypeAndValueStr := `SELECT tx_type, CAST(value as TEXT) FROM eth.transaction_cids WHERE cid = $1` + switch c { + case trx1CID.String(): + require.Equal(t, tx1, data) + txRes := new(txResult) + err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) + if err != nil { + t.Fatal(err) + } + if txRes.TxType != 0 { + t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) + } + if txRes.Value != transactions[0].Value().String() { + t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value) + } + case trx2CID.String(): + require.Equal(t, tx2, data) + txRes := new(txResult) + err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) + if err != nil { + t.Fatal(err) + } + if txRes.TxType != 0 { + t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) + } + if txRes.Value != transactions[1].Value().String() { + t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value) + } + case trx3CID.String(): + require.Equal(t, tx3, data) + txRes := new(txResult) + err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) + if err != nil { + t.Fatal(err) + } + if txRes.TxType != 0 { + t.Fatalf("expected LegacyTxType (0), got %d", txRes.TxType) + } + if txRes.Value != transactions[2].Value().String() { + t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value) + } + case trx4CID.String(): + require.Equal(t, tx4, data) + txRes := new(txResult) + err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) + if err != nil { + t.Fatal(err) + } + if txRes.TxType != types.AccessListTxType { + t.Fatalf("expected AccessListTxType (1), got %d", txRes.TxType) + } + if txRes.Value != transactions[3].Value().String() { + t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value) + } + accessListElementModels := make([]models.AccessListElementModel, 0) + pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " + + "access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " + + "INNER JOIN eth.transaction_cids ON (tx_id = transaction_cids.tx_hash) WHERE cid = $1 ORDER BY access_list_elements.index ASC" + err = db.Select(context.Background(), &accessListElementModels, pgStr, c) + if err != nil { + t.Fatal(err) + } + if len(accessListElementModels) != 2 { + t.Fatalf("expected two access list entries, got %d", len(accessListElementModels)) + } + model1 := models.AccessListElementModel{ + BlockNumber: mocks.BlockNumber.String(), + Index: accessListElementModels[0].Index, + Address: accessListElementModels[0].Address, + } + model2 := models.AccessListElementModel{ + BlockNumber: mocks.BlockNumber.String(), + Index: accessListElementModels[1].Index, + Address: accessListElementModels[1].Address, + StorageKeys: accessListElementModels[1].StorageKeys, + } + require.Equal(t, mocks.AccessListEntry1Model, model1) + require.Equal(t, mocks.AccessListEntry2Model, model2) + case trx5CID.String(): + require.Equal(t, tx5, data) + txRes := new(txResult) + err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value) + if err != nil { + t.Fatal(err) + } + if txRes.TxType != types.DynamicFeeTxType { + t.Fatalf("expected DynamicFeeTxType (2), got %d", txRes.TxType) + } + if txRes.Value != transactions[4].Value().String() { + t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value) + } + } + } +} + +func TestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) { + rcts := make([]string, 0) + rctsPgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids + WHERE receipt_cids.tx_id = transaction_cids.tx_hash + AND transaction_cids.header_id = header_cids.block_hash + AND header_cids.block_number = $1 + ORDER BY transaction_cids.index` + logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids + INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id) + INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key) + WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC` + err = db.Select(context.Background(), &rcts, rctsPgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + if len(rcts) != len(mocks.MockReceipts) { + t.Fatalf("expected %d receipts, got %d", len(mocks.MockReceipts), len(rcts)) + } + + type logIPLD struct { + Index int `db:"index"` + Address string `db:"address"` + Data []byte `db:"data"` + Topic0 string `db:"topic0"` + Topic1 string `db:"topic1"` + } + for i := range rcts { + results := make([]logIPLD, 0) + err = db.Select(context.Background(), &results, logsPgStr, rcts[i]) + require.NoError(t, err) + + expectedLogs := mocks.MockReceipts[i].Logs + require.Equal(t, len(expectedLogs), len(results)) + + var nodeElements []interface{} + for idx, r := range results { + // Attempt to decode the log leaf node. + err = rlp.DecodeBytes(r.Data, &nodeElements) + require.NoError(t, err) + if len(nodeElements) == 2 { + logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) + require.NoError(t, err) + // 2nd element of the leaf node contains the encoded log data. + require.Equal(t, nodeElements[1].([]byte), logRaw) + } else { + logRaw, err := rlp.EncodeToBytes(&expectedLogs[idx]) + require.NoError(t, err) + // raw log was IPLDized + require.Equal(t, r.Data, logRaw) + } + } + } +} + +func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) { + // check receipts were properly indexed and published + rcts := make([]string, 0) + pgStr := `SELECT receipt_cids.leaf_cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids + WHERE receipt_cids.tx_id = transaction_cids.tx_hash + AND transaction_cids.header_id = header_cids.block_hash + AND header_cids.block_number = $1 order by transaction_cids.index` + err = db.Select(context.Background(), &rcts, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 5, len(rcts)) + expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String())) + expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String())) + expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String())) + expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String())) + expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String())) + + for idx, c := range rcts { + result := make([]models.IPLDModel, 0) + pgStr = `SELECT data + FROM eth.receipt_cids + INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key) + WHERE receipt_cids.leaf_cid = $1` + err = db.Select(context.Background(), &result, pgStr, c) + if err != nil { + t.Fatal(err) + } + + // Decode the receipt leaf node. + var nodeElements []interface{} + err = rlp.DecodeBytes(result[0].Data, &nodeElements) + require.NoError(t, err) + + expectedRct, err := mocks.MockReceipts[idx].MarshalBinary() + require.NoError(t, err) + + require.Equal(t, nodeElements[1].([]byte), expectedRct) + + dc, err := cid.Decode(c) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + var data []byte + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + + postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE leaf_cid = $1` + switch c { + case rct1CID.String(): + require.Equal(t, rctLeaf1, data) + var postStatus uint64 + pgStr = `SELECT post_status FROM eth.receipt_cids WHERE leaf_cid = $1` + err = db.Get(context.Background(), &postStatus, pgStr, c) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.ExpectedPostStatus, postStatus) + case rct2CID.String(): + require.Equal(t, rctLeaf2, data) + var postState string + err = db.Get(context.Background(), &postState, postStatePgStr, c) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.ExpectedPostState1, postState) + case rct3CID.String(): + require.Equal(t, rctLeaf3, data) + var postState string + err = db.Get(context.Background(), &postState, postStatePgStr, c) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.ExpectedPostState2, postState) + case rct4CID.String(): + require.Equal(t, rctLeaf4, data) + var postState string + err = db.Get(context.Background(), &postState, postStatePgStr, c) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.ExpectedPostState3, postState) + case rct5CID.String(): + require.Equal(t, rctLeaf5, data) + var postState string + err = db.Get(context.Background(), &postState, postStatePgStr, c) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.ExpectedPostState3, postState) + } + } +} + +func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) { + // check that state nodes were properly indexed and published + stateNodes := make([]models.StateNodeModel, 0) + pgStr := `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id + FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) + WHERE header_cids.block_number = $1 AND node_type != 3` + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 2, len(stateNodes)) + for _, stateNode := range stateNodes { + var data []byte + dc, err := cid.Decode(stateNode.CID) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + pgStr = `SELECT cast(block_number AS TEXT), header_id, state_path, cast(balance AS TEXT), nonce, code_hash, storage_root from eth.state_accounts WHERE header_id = $1 AND state_path = $2` + var account models.StateAccountModel + err = db.Get(context.Background(), &account, pgStr, stateNode.HeaderID, stateNode.Path) + if err != nil { + t.Fatal(err) + } + if stateNode.CID == state1CID.String() { + require.Equal(t, 2, stateNode.NodeType) + require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey) + require.Equal(t, []byte{'\x06'}, stateNode.Path) + require.Equal(t, mocks.ContractLeafNode, data) + require.Equal(t, models.StateAccountModel{ + BlockNumber: mocks.BlockNumber.String(), + HeaderID: account.HeaderID, + StatePath: stateNode.Path, + Balance: "0", + CodeHash: mocks.ContractCodeHash.Bytes(), + StorageRoot: mocks.ContractRoot, + Nonce: 1, + }, account) + } + if stateNode.CID == state2CID.String() { + require.Equal(t, 2, stateNode.NodeType) + require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey) + require.Equal(t, []byte{'\x0c'}, stateNode.Path) + require.Equal(t, mocks.AccountLeafNode, data) + require.Equal(t, models.StateAccountModel{ + BlockNumber: mocks.BlockNumber.String(), + HeaderID: account.HeaderID, + StatePath: stateNode.Path, + Balance: "1000", + CodeHash: mocks.AccountCodeHash.Bytes(), + StorageRoot: mocks.AccountRoot, + Nonce: 0, + }, account) + } + } + + // check that Removed state nodes were properly indexed and published + stateNodes = make([]models.StateNodeModel, 0) + pgStr = `SELECT state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id + FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) + WHERE header_cids.block_number = $1 AND node_type = 3 + ORDER BY state_path` + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 2, len(stateNodes)) + for idx, stateNode := range stateNodes { + var data []byte + dc, err := cid.Decode(stateNode.CID) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + + if idx == 0 { + require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) + require.Equal(t, common.BytesToHash(mocks.RemovedLeafKey).Hex(), stateNode.StateKey) + require.Equal(t, []byte{'\x02'}, stateNode.Path) + require.Equal(t, []byte{}, data) + } + if idx == 1 { + require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID) + require.Equal(t, common.BytesToHash(mocks.Contract2LeafKey).Hex(), stateNode.StateKey) + require.Equal(t, []byte{'\x07'}, stateNode.Path) + require.Equal(t, []byte{}, data) + } + } +} + +func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) { + // check that storage nodes were properly indexed + storageNodes := make([]models.StorageNodeWithStateKeyModel, 0) + pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path + FROM eth.storage_cids, eth.state_cids, eth.header_cids + WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) + AND state_cids.header_id = header_cids.block_hash + AND header_cids.block_number = $1 + AND storage_cids.node_type != 3 + ORDER BY storage_path` + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 1, len(storageNodes)) + require.Equal(t, models.StorageNodeWithStateKeyModel{ + BlockNumber: mocks.BlockNumber.String(), + CID: storageCID.String(), + NodeType: 2, + StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(), + StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), + Path: []byte{}, + }, storageNodes[0]) + var data []byte + dc, err := cid.Decode(storageNodes[0].CID) + if err != nil { + t.Fatal(err) + } + mhKey := dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey := blockstore.BlockPrefix.String() + mhKey.String() + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, mocks.StorageLeafNode, data) + + // check that Removed storage nodes were properly indexed + storageNodes = make([]models.StorageNodeWithStateKeyModel, 0) + pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path + FROM eth.storage_cids, eth.state_cids, eth.header_cids + WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id) + AND state_cids.header_id = header_cids.block_hash + AND header_cids.block_number = $1 + AND storage_cids.node_type = 3 + ORDER BY storage_path` + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 3, len(storageNodes)) + expectedStorageNodes := []models.StorageNodeWithStateKeyModel{ + { + BlockNumber: mocks.BlockNumber.String(), + CID: shared.RemovedNodeStorageCID, + NodeType: 3, + StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(), + StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(), + Path: []byte{'\x03'}, + }, + { + BlockNumber: mocks.BlockNumber.String(), + CID: shared.RemovedNodeStorageCID, + NodeType: 3, + StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(), + StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), + Path: []byte{'\x0e'}, + }, + { + BlockNumber: mocks.BlockNumber.String(), + CID: shared.RemovedNodeStorageCID, + NodeType: 3, + StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(), + StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(), + Path: []byte{'\x0f'}, + }, + } + for idx, storageNode := range storageNodes { + require.Equal(t, expectedStorageNodes[idx], storageNode) + dc, err = cid.Decode(storageNode.CID) + if err != nil { + t.Fatal(err) + } + mhKey = dshelp.MultihashToDsKey(dc.Hash()) + prefixedKey = blockstore.BlockPrefix.String() + mhKey.String() + require.Equal(t, shared.RemovedNodeMhKey, prefixedKey) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, []byte{}, data) + } +} + +// SetupTestDataNonCanonical indexes a mock block and a non-canonical mock block at London height +// and a non-canonical block at London height + 1 +// along with their state nodes +func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) { + // index a canonical block at London height + var tx1 interfaces.Batch + tx1, err = ind.PushBlock( + mockBlock, + mocks.MockReceipts, + mocks.MockBlock.Difficulty()) + if err != nil { + t.Fatal(err) + } + for _, node := range mocks.StateDiffs { + err = ind.PushStateNode(tx1, node, mockBlock.Hash().String()) + require.NoError(t, err) + } + + if batchTx, ok := tx1.(*sql.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), batchTx.BlockNumber) + } else if batchTx, ok := tx1.(*file.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), batchTx.BlockNumber) + } + + if err := tx1.Submit(err); err != nil { + t.Fatal(err) + } + + // index a non-canonical block at London height + // has transactions overlapping with that of the canonical block + var tx2 interfaces.Batch + tx2, err = ind.PushBlock( + mockNonCanonicalBlock, + mocks.MockNonCanonicalBlockReceipts, + mockNonCanonicalBlock.Difficulty()) + if err != nil { + t.Fatal(err) + } + for _, node := range mocks.StateDiffs { + err = ind.PushStateNode(tx2, node, mockNonCanonicalBlock.Hash().String()) + require.NoError(t, err) + } + + if tx, ok := tx2.(*sql.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), tx.BlockNumber) + } else if tx, ok := tx2.(*sql.BatchTx); ok { + require.Equal(t, mocks.BlockNumber.String(), tx.BlockNumber) + } + + if err := tx2.Submit(err); err != nil { + t.Fatal(err) + } + + // index a non-canonical block at London height + 1 + // has transactions overlapping with that of the canonical block + var tx3 interfaces.Batch + tx3, err = ind.PushBlock( + mockNonCanonicalBlock2, + mocks.MockNonCanonicalBlock2Receipts, + mockNonCanonicalBlock2.Difficulty()) + if err != nil { + t.Fatal(err) + } + for _, node := range mocks.StateDiffs[:2] { + err = ind.PushStateNode(tx3, node, mockNonCanonicalBlock2.Hash().String()) + require.NoError(t, err) + } + + if batchTx, ok := tx3.(*sql.BatchTx); ok { + require.Equal(t, mocks.Block2Number.String(), batchTx.BlockNumber) + } else if batchTx, ok := tx3.(*file.BatchTx); ok { + require.Equal(t, mocks.Block2Number.String(), batchTx.BlockNumber) + } + + if err := tx3.Submit(err); err != nil { + t.Fatal(err) + } +} + +func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) { + // check indexed headers + pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT), + tx_root, receipt_root, uncle_root, coinbase + FROM eth.header_cids + ORDER BY block_number` + headerRes := make([]models.HeaderModel, 0) + err = db.Select(context.Background(), &headerRes, pgStr) + if err != nil { + t.Fatal(err) + } + + // expect three blocks to be indexed + // a canonical and a non-canonical block at London height, + // non-canonical block at London height + 1 + expectedRes := []models.HeaderModel{ + { + BlockNumber: mockBlock.Number().String(), + BlockHash: mockBlock.Hash().String(), + CID: headerCID.String(), + TotalDifficulty: mockBlock.Difficulty().String(), + TxRoot: mockBlock.TxHash().String(), + RctRoot: mockBlock.ReceiptHash().String(), + UncleRoot: mockBlock.UncleHash().String(), + Coinbase: mocks.MockHeader.Coinbase.String(), + }, + { + BlockNumber: mockNonCanonicalBlock.Number().String(), + BlockHash: mockNonCanonicalBlock.Hash().String(), + CID: mockNonCanonicalHeaderCID.String(), + TotalDifficulty: mockNonCanonicalBlock.Difficulty().String(), + TxRoot: mockNonCanonicalBlock.TxHash().String(), + RctRoot: mockNonCanonicalBlock.ReceiptHash().String(), + UncleRoot: mockNonCanonicalBlock.UncleHash().String(), + Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(), + }, + { + BlockNumber: mockNonCanonicalBlock2.Number().String(), + BlockHash: mockNonCanonicalBlock2.Hash().String(), + CID: mockNonCanonicalHeader2CID.String(), + TotalDifficulty: mockNonCanonicalBlock2.Difficulty().String(), + TxRoot: mockNonCanonicalBlock2.TxHash().String(), + RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(), + UncleRoot: mockNonCanonicalBlock2.UncleHash().String(), + Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(), + }, + } + expectedRes[0].Reward = shared.CalcEthBlockReward(mockBlock.Header(), mockBlock.Uncles(), mockBlock.Transactions(), mocks.MockReceipts).String() + expectedRes[1].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock.Header(), mockNonCanonicalBlock.Uncles(), mockNonCanonicalBlock.Transactions(), mocks.MockNonCanonicalBlockReceipts).String() + expectedRes[2].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock2.Header(), mockNonCanonicalBlock2.Uncles(), mockNonCanonicalBlock2.Transactions(), mocks.MockNonCanonicalBlock2Receipts).String() + + require.Equal(t, len(expectedRes), len(headerRes)) + require.ElementsMatch(t, + []string{mockBlock.Hash().String(), mockNonCanonicalBlock.Hash().String(), mockNonCanonicalBlock2.Hash().String()}, + []string{headerRes[0].BlockHash, headerRes[1].BlockHash, headerRes[2].BlockHash}, + ) + + if headerRes[0].BlockHash == mockBlock.Hash().String() { + require.Equal(t, expectedRes[0], headerRes[0]) + require.Equal(t, expectedRes[1], headerRes[1]) + require.Equal(t, expectedRes[2], headerRes[2]) + } else { + require.Equal(t, expectedRes[1], headerRes[0]) + require.Equal(t, expectedRes[0], headerRes[1]) + require.Equal(t, expectedRes[2], headerRes[2]) + } + + // check indexed IPLD blocks + headerCIDs := []cid.Cid{headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID} + blockNumbers := []uint64{mocks.BlockNumber.Uint64(), mocks.BlockNumber.Uint64(), mocks.Block2Number.Uint64()} + headerRLPs := [][]byte{mocks.MockHeaderRlp, mocks.MockNonCanonicalHeaderRlp, mocks.MockNonCanonicalHeader2Rlp} + for i := range expectedRes { + var data []byte + prefixedKey := shared.MultihashKeyFromCID(headerCIDs[i]) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, blockNumbers[i]) + if err != nil { + t.Fatal(err) + } + require.Equal(t, headerRLPs[i], data) + } +} + +func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database) { + // check indexed transactions + pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index, + tx_data, tx_type, CAST(value as TEXT) + FROM eth.transaction_cids + ORDER BY block_number, index` + txRes := make([]models.TxModel, 0) + err = db.Select(context.Background(), &txRes, pgStr) + if err != nil { + t.Fatal(err) + } + + // expected transactions in the canonical block + mockBlockTxs := mocks.MockBlock.Transactions() + expectedBlockTxs := []models.TxModel{ + { + BlockNumber: mockBlock.Number().String(), + HeaderID: mockBlock.Hash().String(), + TxHash: mockBlockTxs[0].Hash().String(), + CID: trx1CID.String(), + Dst: shared.HandleZeroAddrPointer(mockBlockTxs[0].To()), + Src: mocks.SenderAddr.String(), + Index: 0, + Data: mockBlockTxs[0].Data(), + Type: mockBlockTxs[0].Type(), + Value: mockBlockTxs[0].Value().String(), + }, + { + BlockNumber: mockBlock.Number().String(), + HeaderID: mockBlock.Hash().String(), + TxHash: mockBlockTxs[1].Hash().String(), + CID: trx2CID.String(), + Dst: shared.HandleZeroAddrPointer(mockBlockTxs[1].To()), + Src: mocks.SenderAddr.String(), + Index: 1, + Data: mockBlockTxs[1].Data(), + Type: mockBlockTxs[1].Type(), + Value: mockBlockTxs[1].Value().String(), + }, + { + BlockNumber: mockBlock.Number().String(), + HeaderID: mockBlock.Hash().String(), + TxHash: mockBlockTxs[2].Hash().String(), + CID: trx3CID.String(), + Dst: shared.HandleZeroAddrPointer(mockBlockTxs[2].To()), + Src: mocks.SenderAddr.String(), + Index: 2, + Data: mockBlockTxs[2].Data(), + Type: mockBlockTxs[2].Type(), + Value: mockBlockTxs[2].Value().String(), + }, + { + BlockNumber: mockBlock.Number().String(), + HeaderID: mockBlock.Hash().String(), + TxHash: mockBlockTxs[3].Hash().String(), + CID: trx4CID.String(), + Dst: shared.HandleZeroAddrPointer(mockBlockTxs[3].To()), + Src: mocks.SenderAddr.String(), + Index: 3, + Data: mockBlockTxs[3].Data(), + Type: mockBlockTxs[3].Type(), + Value: mockBlockTxs[3].Value().String(), + }, + { + BlockNumber: mockBlock.Number().String(), + HeaderID: mockBlock.Hash().String(), + TxHash: mockBlockTxs[4].Hash().String(), + CID: trx5CID.String(), + Dst: shared.HandleZeroAddrPointer(mockBlockTxs[4].To()), + Src: mocks.SenderAddr.String(), + Index: 4, + Data: mockBlockTxs[4].Data(), + Type: mockBlockTxs[4].Type(), + Value: mockBlockTxs[4].Value().String(), + }, + } + + // expected transactions in the non-canonical block at London height + mockNonCanonicalBlockTxs := mockNonCanonicalBlock.Transactions() + expectedNonCanonicalBlockTxs := []models.TxModel{ + { + BlockNumber: mockNonCanonicalBlock.Number().String(), + HeaderID: mockNonCanonicalBlock.Hash().String(), + TxHash: mockNonCanonicalBlockTxs[0].Hash().String(), + CID: trx2CID.String(), + Dst: mockNonCanonicalBlockTxs[0].To().String(), + Src: mocks.SenderAddr.String(), + Index: 0, + Data: mockNonCanonicalBlockTxs[0].Data(), + Type: mockNonCanonicalBlockTxs[0].Type(), + Value: mockNonCanonicalBlockTxs[0].Value().String(), + }, + { + BlockNumber: mockNonCanonicalBlock.Number().String(), + HeaderID: mockNonCanonicalBlock.Hash().String(), + TxHash: mockNonCanonicalBlockTxs[1].Hash().String(), + CID: trx5CID.String(), + Dst: mockNonCanonicalBlockTxs[1].To().String(), + Src: mocks.SenderAddr.String(), + Index: 1, + Data: mockNonCanonicalBlockTxs[1].Data(), + Type: mockNonCanonicalBlockTxs[1].Type(), + Value: mockNonCanonicalBlockTxs[1].Value().String(), + }, + } + + // expected transactions in the non-canonical block at London height + 1 + mockNonCanonicalBlock2Txs := mockNonCanonicalBlock2.Transactions() + expectedNonCanonicalBlock2Txs := []models.TxModel{ + { + BlockNumber: mockNonCanonicalBlock2.Number().String(), + HeaderID: mockNonCanonicalBlock2.Hash().String(), + TxHash: mockNonCanonicalBlock2Txs[0].Hash().String(), + CID: trx3CID.String(), + Dst: "", + Src: mocks.SenderAddr.String(), + Index: 0, + Data: mockNonCanonicalBlock2Txs[0].Data(), + Type: mockNonCanonicalBlock2Txs[0].Type(), + Value: mockNonCanonicalBlock2Txs[0].Value().String(), + }, + { + BlockNumber: mockNonCanonicalBlock2.Number().String(), + HeaderID: mockNonCanonicalBlock2.Hash().String(), + TxHash: mockNonCanonicalBlock2Txs[1].Hash().String(), + CID: trx5CID.String(), + Dst: mockNonCanonicalBlock2Txs[1].To().String(), + Src: mocks.SenderAddr.String(), + Index: 1, + Data: mockNonCanonicalBlock2Txs[1].Data(), + Type: mockNonCanonicalBlock2Txs[1].Type(), + Value: mockNonCanonicalBlock2Txs[1].Value().String(), + }, + } + + require.Equal(t, len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+len(expectedNonCanonicalBlock2Txs), len(txRes)) + + // sort results such that non-canonical block transactions come after canonical block ones + sort.SliceStable(txRes, func(i, j int) bool { + if txRes[i].BlockNumber < txRes[j].BlockNumber { + return true + } else if txRes[i].HeaderID == txRes[j].HeaderID { + return txRes[i].Index < txRes[j].Index + } else if txRes[i].HeaderID == mockBlock.Hash().String() { + return true + } else { + return false + } + }) + + for i, expectedTx := range expectedBlockTxs { + require.Equal(t, expectedTx, txRes[i]) + } + for i, expectedTx := range expectedNonCanonicalBlockTxs { + require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+i]) + } + for i, expectedTx := range expectedNonCanonicalBlock2Txs { + require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+i]) + } + + // check indexed IPLD blocks + var data []byte + var prefixedKey string + + txCIDs := []cid.Cid{trx1CID, trx2CID, trx3CID, trx4CID, trx5CID} + txRLPs := [][]byte{tx1, tx2, tx3, tx4, tx5} + for i, txCID := range txCIDs { + prefixedKey = shared.MultihashKeyFromCID(txCID) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, txRLPs[i], data) + } +} + +func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) { + // check indexed receipts + pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, leaf_cid, leaf_mh_key, post_status, post_state, contract, contract_hash, log_root + FROM eth.receipt_cids + ORDER BY block_number` + rctRes := make([]models.ReceiptModel, 0) + err = db.Select(context.Background(), &rctRes, pgStr) + if err != nil { + t.Fatal(err) + } + + // expected receipts in the canonical block + rctCids := []cid.Cid{rct1CID, rct2CID, rct3CID, rct4CID, rct5CID} + expectedBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockReceipts)) + for i, mockBlockRct := range mocks.MockReceipts { + rctModel := createRctModel(mockBlockRct, rctCids[i], mockBlock.Number().String()) + expectedBlockRctsMap[rctCids[i].String()] = rctModel + } + + // expected receipts in the non-canonical block at London height + nonCanonicalBlockRctCids := []cid.Cid{nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID} + expectedNonCanonicalBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlockReceipts)) + for i, mockNonCanonicalBlockRct := range mocks.MockNonCanonicalBlockReceipts { + rctModel := createRctModel(mockNonCanonicalBlockRct, nonCanonicalBlockRctCids[i], mockNonCanonicalBlock.Number().String()) + expectedNonCanonicalBlockRctsMap[nonCanonicalBlockRctCids[i].String()] = rctModel + } + + // expected receipts in the non-canonical block at London height + 1 + nonCanonicalBlock2RctCids := []cid.Cid{nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID} + expectedNonCanonicalBlock2RctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlock2Receipts)) + for i, mockNonCanonicalBlock2Rct := range mocks.MockNonCanonicalBlock2Receipts { + rctModel := createRctModel(mockNonCanonicalBlock2Rct, nonCanonicalBlock2RctCids[i], mockNonCanonicalBlock2.Number().String()) + expectedNonCanonicalBlock2RctsMap[nonCanonicalBlock2RctCids[i].String()] = rctModel + } + + require.Equal(t, len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+len(expectedNonCanonicalBlock2RctsMap), len(rctRes)) + + // sort results such that non-canonical block reciepts come after canonical block ones + sort.SliceStable(rctRes, func(i, j int) bool { + if rctRes[i].BlockNumber < rctRes[j].BlockNumber { + return true + } else if rctRes[i].HeaderID == rctRes[j].HeaderID { + return false + } else if rctRes[i].HeaderID == mockBlock.Hash().String() { + return true + } else { + return false + } + }) + + for i := 0; i < len(expectedBlockRctsMap); i++ { + rct := rctRes[i] + require.Contains(t, expectedBlockRctsMap, rct.LeafCID) + require.Equal(t, expectedBlockRctsMap[rct.LeafCID], rct) + } + + for i := 0; i < len(expectedNonCanonicalBlockRctsMap); i++ { + rct := rctRes[len(expectedBlockRctsMap)+i] + require.Contains(t, expectedNonCanonicalBlockRctsMap, rct.LeafCID) + require.Equal(t, expectedNonCanonicalBlockRctsMap[rct.LeafCID], rct) + } + + for i := 0; i < len(expectedNonCanonicalBlock2RctsMap); i++ { + rct := rctRes[len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+i] + require.Contains(t, expectedNonCanonicalBlock2RctsMap, rct.LeafCID) + require.Equal(t, expectedNonCanonicalBlock2RctsMap[rct.LeafCID], rct) + } + + // check indexed rct IPLD blocks + var data []byte + var prefixedKey string + + rctRLPs := [][]byte{ + rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5, + nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2, + } + for i, rctCid := range append(rctCids, nonCanonicalBlockRctCids...) { + prefixedKey = shared.MultihashKeyFromCID(rctCid) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, rctRLPs[i], data) + } + + nonCanonicalBlock2RctRLPs := [][]byte{nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2} + for i, rctCid := range nonCanonicalBlock2RctCids { + prefixedKey = shared.MultihashKeyFromCID(rctCid) + err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.Block2Number.Uint64()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, nonCanonicalBlock2RctRLPs[i], data) + } +} + +func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) { + // check indexed logs + pgStr := `SELECT address, log_data, topic0, topic1, topic2, topic3, data + FROM eth.log_cids + INNER JOIN public.blocks ON (log_cids.block_number = blocks.block_number AND log_cids.leaf_mh_key = blocks.key) + WHERE log_cids.block_number = $1 AND header_id = $2 AND rct_id = $3 + ORDER BY log_cids.index ASC` + + type rctWithBlockHash struct { + rct *types.Receipt + blockHash string + blockNumber uint64 + } + mockRcts := make([]rctWithBlockHash, 0) + + // logs in the canonical block + for _, mockBlockRct := range mocks.MockReceipts { + mockRcts = append(mockRcts, rctWithBlockHash{ + mockBlockRct, + mockBlock.Hash().String(), + mockBlock.NumberU64(), + }) + } + + // logs in the non-canonical block at London height + for _, mockBlockRct := range mocks.MockNonCanonicalBlockReceipts { + mockRcts = append(mockRcts, rctWithBlockHash{ + mockBlockRct, + mockNonCanonicalBlock.Hash().String(), + mockNonCanonicalBlock.NumberU64(), + }) + } + + // logs in the non-canonical block at London height + 1 + for _, mockBlockRct := range mocks.MockNonCanonicalBlock2Receipts { + mockRcts = append(mockRcts, rctWithBlockHash{ + mockBlockRct, + mockNonCanonicalBlock2.Hash().String(), + mockNonCanonicalBlock2.NumberU64(), + }) + } + + for _, mockRct := range mockRcts { + type logWithIPLD struct { + models.LogsModel + IPLDData []byte `db:"data"` + } + logRes := make([]logWithIPLD, 0) + err = db.Select(context.Background(), &logRes, pgStr, mockRct.blockNumber, mockRct.blockHash, mockRct.rct.TxHash.String()) + require.NoError(t, err) + require.Equal(t, len(mockRct.rct.Logs), len(logRes)) + + for i, log := range mockRct.rct.Logs { + topicSet := make([]string, 4) + for ti, topic := range log.Topics { + topicSet[ti] = topic.Hex() + } + + expectedLog := models.LogsModel{ + Address: log.Address.String(), + Data: log.Data, + Topic0: topicSet[0], + Topic1: topicSet[1], + Topic2: topicSet[2], + Topic3: topicSet[3], + } + require.Equal(t, expectedLog, logRes[i].LogsModel) + + // check indexed log IPLD block + var nodeElements []interface{} + err = rlp.DecodeBytes(logRes[i].IPLDData, &nodeElements) + require.NoError(t, err) + + if len(nodeElements) == 2 { + logRaw, err := rlp.EncodeToBytes(log) + require.NoError(t, err) + // 2nd element of the leaf node contains the encoded log data. + require.Equal(t, nodeElements[1].([]byte), logRaw) + } else { + logRaw, err := rlp.EncodeToBytes(log) + require.NoError(t, err) + // raw log was IPLDized + require.Equal(t, logRes[i].IPLDData, logRaw) + } + } + } +} + +func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) { + // check indexed state nodes + pgStr := `SELECT state_path, state_leaf_key, node_type, cid, mh_key, diff + FROM eth.state_cids + WHERE block_number = $1 + AND header_id = $2 + ORDER BY state_path` + + removedNodeCID, _ := cid.Decode(shared.RemovedNodeStateCID) + stateNodeCIDs := []cid.Cid{state1CID, state2CID, removedNodeCID, removedNodeCID} + + // expected state nodes in the canonical and the non-canonical block at London height + expectedStateNodes := make([]models.StateNodeModel, 0) + for i, stateDiff := range mocks.StateDiffs { + expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{ + Path: stateDiff.Path, + StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(), + NodeType: stateDiff.NodeType.Int(), + CID: stateNodeCIDs[i].String(), + MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]), + Diff: true, + }) + } + sort.Slice(expectedStateNodes, func(i, j int) bool { + if bytes.Compare(expectedStateNodes[i].Path, expectedStateNodes[j].Path) < 0 { + return true + } else { + return false + } + }) + + // expected state nodes in the non-canonical block at London height + 1 + expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0) + for i, stateDiff := range mocks.StateDiffs[:2] { + expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{ + Path: stateDiff.Path, + StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(), + NodeType: stateDiff.NodeType.Int(), + CID: stateNodeCIDs[i].String(), + MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]), + Diff: true, + }) + } + + // check state nodes for canonical block + stateNodes := make([]models.StateNodeModel, 0) + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedStateNodes), len(stateNodes)) + + for i, expectedStateNode := range expectedStateNodes { + require.Equal(t, expectedStateNode, stateNodes[i]) + } + + // check state nodes for non-canonical block at London height + stateNodes = make([]models.StateNodeModel, 0) + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedStateNodes), len(stateNodes)) + + for i, expectedStateNode := range expectedStateNodes { + require.Equal(t, expectedStateNode, stateNodes[i]) + } + + // check state nodes for non-canonical block at London height + 1 + stateNodes = make([]models.StateNodeModel, 0) + err = db.Select(context.Background(), &stateNodes, pgStr, mocks.Block2Number.Uint64(), mockNonCanonicalBlock2.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedNonCanonicalBlock2StateNodes), len(stateNodes)) + + for i, expectedStateNode := range expectedNonCanonicalBlock2StateNodes { + require.Equal(t, expectedStateNode, stateNodes[i]) + } +} + +func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) { + // check indexed storage nodes + pgStr := `SELECT state_path, storage_path, storage_leaf_key, node_type, cid, mh_key, diff + FROM eth.storage_cids + WHERE block_number = $1 + AND header_id = $2 + ORDER BY state_path, storage_path` + + removedNodeCID, _ := cid.Decode(shared.RemovedNodeStorageCID) + storageNodeCIDs := []cid.Cid{storageCID, removedNodeCID, removedNodeCID, removedNodeCID} + + // expected storage nodes in the canonical and the non-canonical block at London height + expectedStorageNodes := make([]models.StorageNodeModel, 0) + storageNodeIndex := 0 + for _, stateDiff := range mocks.StateDiffs { + for _, storageNode := range stateDiff.StorageNodes { + expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{ + StatePath: stateDiff.Path, + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(), + NodeType: storageNode.NodeType.Int(), + CID: storageNodeCIDs[storageNodeIndex].String(), + MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]), + Diff: true, + }) + storageNodeIndex++ + } + } + sort.Slice(expectedStorageNodes, func(i, j int) bool { + if bytes.Compare(expectedStorageNodes[i].Path, expectedStorageNodes[j].Path) < 0 { + return true + } else { + return false + } + }) + + // expected storage nodes in the non-canonical block at London height + 1 + expectedNonCanonicalBlock2StorageNodes := make([]models.StorageNodeModel, 0) + storageNodeIndex = 0 + for _, stateDiff := range mocks.StateDiffs[:2] { + for _, storageNode := range stateDiff.StorageNodes { + expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{ + StatePath: stateDiff.Path, + Path: storageNode.Path, + StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(), + NodeType: storageNode.NodeType.Int(), + CID: storageNodeCIDs[storageNodeIndex].String(), + MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]), + Diff: true, + }) + storageNodeIndex++ + } + } + + // check storage nodes for canonical block + storageNodes := make([]models.StorageNodeModel, 0) + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedStorageNodes), len(storageNodes)) + + for i, expectedStorageNode := range expectedStorageNodes { + require.Equal(t, expectedStorageNode, storageNodes[i]) + } + + // check storage nodes for non-canonical block at London height + storageNodes = make([]models.StorageNodeModel, 0) + err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedStorageNodes), len(storageNodes)) + + for i, expectedStorageNode := range expectedStorageNodes { + require.Equal(t, expectedStorageNode, storageNodes[i]) + } + + // check storage nodes for non-canonical block at London height + 1 + storageNodes = make([]models.StorageNodeModel, 0) + err = db.Select(context.Background(), &storageNodes, pgStr, mockNonCanonicalBlock2.NumberU64(), mockNonCanonicalBlock2.Hash().String()) + if err != nil { + t.Fatal(err) + } + require.Equal(t, len(expectedNonCanonicalBlock2StorageNodes), len(storageNodes)) + + for i, expectedStorageNode := range expectedNonCanonicalBlock2StorageNodes { + require.Equal(t, expectedStorageNode, storageNodes[i]) + } +} diff --git a/statediff/indexer/test/test_init.go b/statediff/indexer/test/test_init.go new file mode 100644 index 000000000..283d3e0b0 --- /dev/null +++ b/statediff/indexer/test/test_init.go @@ -0,0 +1,248 @@ +// VulcanizeDB +// Copyright © 2022 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test + +import ( + "bytes" + "fmt" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ethereum/go-ethereum/statediff/indexer/models" + "github.com/ethereum/go-ethereum/statediff/indexer/shared" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +var ( + err error + ipfsPgGet = `SELECT data FROM public.blocks + WHERE key = $1 AND block_number = $2` + watchedAddressesPgGet = `SELECT * + FROM eth_meta.watched_addresses` + tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte + nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte + nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte + mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block + headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid + trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid + rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid + nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid + nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid + rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte + nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte + nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte + state1CID, state2CID, storageCID cid.Cid +) + +func init() { + if os.Getenv("MODE") != "statediff" { + fmt.Println("Skipping statediff test") + os.Exit(0) + } + + // canonical block at LondonBlock height + mockBlock = mocks.MockBlock + txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts + + // non-canonical block at LondonBlock height + mockNonCanonicalBlock = mocks.MockNonCanonicalBlock + nonCanonicalBlockRcts := mocks.MockNonCanonicalBlockReceipts + + // non-canonical block at LondonBlock height + 1 + mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2 + nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts + + // encode mock receipts + buf := new(bytes.Buffer) + txs.EncodeIndex(0, buf) + tx1 = make([]byte, buf.Len()) + copy(tx1, buf.Bytes()) + buf.Reset() + + txs.EncodeIndex(1, buf) + tx2 = make([]byte, buf.Len()) + copy(tx2, buf.Bytes()) + buf.Reset() + + txs.EncodeIndex(2, buf) + tx3 = make([]byte, buf.Len()) + copy(tx3, buf.Bytes()) + buf.Reset() + + txs.EncodeIndex(3, buf) + tx4 = make([]byte, buf.Len()) + copy(tx4, buf.Bytes()) + buf.Reset() + + txs.EncodeIndex(4, buf) + tx5 = make([]byte, buf.Len()) + copy(tx5, buf.Bytes()) + buf.Reset() + + rcts.EncodeIndex(0, buf) + rct1 = make([]byte, buf.Len()) + copy(rct1, buf.Bytes()) + buf.Reset() + + rcts.EncodeIndex(1, buf) + rct2 = make([]byte, buf.Len()) + copy(rct2, buf.Bytes()) + buf.Reset() + + rcts.EncodeIndex(2, buf) + rct3 = make([]byte, buf.Len()) + copy(rct3, buf.Bytes()) + buf.Reset() + + rcts.EncodeIndex(3, buf) + rct4 = make([]byte, buf.Len()) + copy(rct4, buf.Bytes()) + buf.Reset() + + rcts.EncodeIndex(4, buf) + rct5 = make([]byte, buf.Len()) + copy(rct5, buf.Bytes()) + buf.Reset() + + // encode mock receipts for non-canonical blocks + nonCanonicalBlockRcts.EncodeIndex(0, buf) + nonCanonicalBlockRct1 = make([]byte, buf.Len()) + copy(nonCanonicalBlockRct1, buf.Bytes()) + buf.Reset() + + nonCanonicalBlockRcts.EncodeIndex(1, buf) + nonCanonicalBlockRct2 = make([]byte, buf.Len()) + copy(nonCanonicalBlockRct2, buf.Bytes()) + buf.Reset() + + nonCanonicalBlock2Rcts.EncodeIndex(0, buf) + nonCanonicalBlock2Rct1 = make([]byte, buf.Len()) + copy(nonCanonicalBlock2Rct1, buf.Bytes()) + buf.Reset() + + nonCanonicalBlock2Rcts.EncodeIndex(1, buf) + nonCanonicalBlock2Rct2 = make([]byte, buf.Len()) + copy(nonCanonicalBlock2Rct2, buf.Bytes()) + buf.Reset() + + headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256) + mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256) + mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256) + trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256) + trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256) + trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256) + trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256) + trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256) + state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256) + state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256) + storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256) + + // create raw receipts + rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5}) + + rct1CID = rctleafNodeCids[0] + rct2CID = rctleafNodeCids[1] + rct3CID = rctleafNodeCids[2] + rct4CID = rctleafNodeCids[3] + rct5CID = rctleafNodeCids[4] + + rctLeaf1 = rawRctLeafNodes[0] + rctLeaf2 = rawRctLeafNodes[1] + rctLeaf3 = rawRctLeafNodes[2] + rctLeaf4 = rawRctLeafNodes[3] + rctLeaf5 = rawRctLeafNodes[4] + + // create raw receipts for non-canonical blocks + nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2}) + + nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0] + nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1] + + nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0] + nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1] + + nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2}) + + nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0] + nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1] + + nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0] + nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1] +} + +// createRctTrie creates a receipt trie from the given raw receipts +// returns receipt leaf nodes and their CIDs +func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) { + receiptTrie := ipld.NewRctTrie() + + for i, rct := range rcts { + receiptTrie.Add(i, rct) + } + rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes() + + rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes)) + orderedRctLeafNodes := make([][]byte, len(rctLeafNodes)) + for i, rln := range rctLeafNodes { + var idx uint + + r := bytes.NewReader(keys[i].TrieKey) + rlp.Decode(r, &idx) + rctleafNodeCids[idx] = rln.Cid() + orderedRctLeafNodes[idx] = rln.RawData() + } + + return orderedRctLeafNodes, rctleafNodeCids +} + +// createRctModel creates a models.ReceiptModel object from a given ethereum receipt +func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.ReceiptModel { + rctModel := models.ReceiptModel{ + BlockNumber: blockNumber, + HeaderID: rct.BlockHash.String(), + TxID: rct.TxHash.String(), + LeafCID: cid.String(), + LeafMhKey: shared.MultihashKeyFromCID(cid), + LogRoot: rct.LogRoot.String(), + } + + contract := shared.HandleZeroAddr(rct.ContractAddress) + rctModel.Contract = contract + if contract != "" { + rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String() + } + + if len(rct.PostState) == 0 { + rctModel.PostStatus = rct.Status + } else { + rctModel.PostState = common.Bytes2Hex(rct.PostState) + } + + return rctModel +} + +func expectTrue(t *testing.T, value bool) { + if !value { + t.Fatalf("Assertion failed") + } +} diff --git a/statediff/indexer/test/test_legacy.go b/statediff/indexer/test/test_legacy.go new file mode 100644 index 000000000..5838fea71 --- /dev/null +++ b/statediff/indexer/test/test_legacy.go @@ -0,0 +1,96 @@ +// VulcanizeDB +// Copyright © 2022 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/statediff/indexer/database/file" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/ipld" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" +) + +var ( + LegacyConfig = params.MainnetChainConfig + legacyData = mocks.NewLegacyData(LegacyConfig) + mockLegacyBlock *types.Block + legacyHeaderCID cid.Cid +) + +func SetupLegacyTestData(t *testing.T, ind interfaces.StateDiffIndexer) { + mockLegacyBlock = legacyData.MockBlock + legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256) + + var tx interfaces.Batch + tx, err = ind.PushBlock( + mockLegacyBlock, + legacyData.MockReceipts, + legacyData.MockBlock.Difficulty()) + require.NoError(t, err) + + defer func() { + if err := tx.Submit(err); err != nil { + t.Fatal(err) + } + }() + for _, node := range legacyData.StateDiffs { + err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String()) + require.NoError(t, err) + } + + if batchTx, ok := tx.(*sql.BatchTx); ok { + require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber) + } else if batchTx, ok := tx.(*file.BatchTx); ok { + require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber) + } +} + +func TestLegacyIndexer(t *testing.T, db sql.Database) { + pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase + FROM eth.header_cids + WHERE block_number = $1` + // check header was properly indexed + type res struct { + CID string + TD string + Reward string + BlockHash string `db:"block_hash"` + Coinbase string `db:"coinbase"` + } + header := new(res) + err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).Scan( + &header.CID, + &header.TD, + &header.Reward, + &header.BlockHash, + &header.Coinbase) + require.NoError(t, err) + + require.Equal(t, legacyHeaderCID.String(), header.CID) + require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD) + require.Equal(t, "5000000000000011250", header.Reward) + require.Equal(t, legacyData.MockHeader.Coinbase.String(), header.Coinbase) + require.Nil(t, legacyData.MockHeader.BaseFee) +} diff --git a/statediff/indexer/test/test_mainnet.go b/statediff/indexer/test/test_mainnet.go new file mode 100644 index 000000000..24f74eb96 --- /dev/null +++ b/statediff/indexer/test/test_mainnet.go @@ -0,0 +1,53 @@ +// VulcanizeDB +// Copyright © 2022 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/statediff/indexer/database/file" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/stretchr/testify/require" +) + +func TestBlock(t *testing.T, ind interfaces.StateDiffIndexer, testBlock *types.Block, testReceipts types.Receipts) { + var tx interfaces.Batch + tx, err = ind.PushBlock( + testBlock, + testReceipts, + testBlock.Difficulty()) + require.NoError(t, err) + + defer func() { + if err := tx.Submit(err); err != nil { + t.Fatal(err) + } + }() + for _, node := range mocks.StateDiffs { + err = ind.PushStateNode(tx, node, testBlock.Hash().String()) + require.NoError(t, err) + } + + if batchTx, ok := tx.(*sql.BatchTx); ok { + require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber) + } else if batchTx, ok := tx.(*file.BatchTx); ok { + require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber) + } +} diff --git a/statediff/indexer/test/test_watched_addresses.go b/statediff/indexer/test/test_watched_addresses.go new file mode 100644 index 000000000..02949e927 --- /dev/null +++ b/statediff/indexer/test/test_watched_addresses.go @@ -0,0 +1,258 @@ +// VulcanizeDB +// Copyright © 2022 Vulcanize + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. + +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package test + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" + "github.com/ethereum/go-ethereum/statediff/indexer/interfaces" + "github.com/ethereum/go-ethereum/statediff/indexer/mocks" + "github.com/stretchr/testify/require" +) + +type res struct { + Address string `db:"address"` + CreatedAt uint64 `db:"created_at"` + WatchedAt uint64 `db:"watched_at"` + LastFilledAt uint64 `db:"last_filled_at"` +} + +func TestLoadEmptyWatchedAddresses(t *testing.T, ind interfaces.StateDiffIndexer) { + expectedData := []common.Address{} + + rows, err := ind.LoadWatchedAddresses() + require.NoError(t, err) + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestInsertWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{ + { + Address: mocks.Contract1Address, + CreatedAt: mocks.Contract1CreatedAt, + WatchedAt: mocks.WatchedAt1, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract2Address, + CreatedAt: mocks.Contract2CreatedAt, + WatchedAt: mocks.WatchedAt1, + LastFilledAt: mocks.LastFilledAt, + }, + } + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestInsertAlreadyWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{ + { + Address: mocks.Contract1Address, + CreatedAt: mocks.Contract1CreatedAt, + WatchedAt: mocks.WatchedAt1, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract2Address, + CreatedAt: mocks.Contract2CreatedAt, + WatchedAt: mocks.WatchedAt1, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract3Address, + CreatedAt: mocks.Contract3CreatedAt, + WatchedAt: mocks.WatchedAt2, + LastFilledAt: mocks.LastFilledAt, + }, + } + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestRemoveWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{ + { + Address: mocks.Contract1Address, + CreatedAt: mocks.Contract1CreatedAt, + WatchedAt: mocks.WatchedAt1, + LastFilledAt: mocks.LastFilledAt, + }, + } + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestRemoveNonWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{} + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestSetWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{ + { + Address: mocks.Contract1Address, + CreatedAt: mocks.Contract1CreatedAt, + WatchedAt: mocks.WatchedAt2, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract2Address, + CreatedAt: mocks.Contract2CreatedAt, + WatchedAt: mocks.WatchedAt2, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract3Address, + CreatedAt: mocks.Contract3CreatedAt, + WatchedAt: mocks.WatchedAt2, + LastFilledAt: mocks.LastFilledAt, + }, + } + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestSetAlreadyWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{ + { + Address: mocks.Contract4Address, + CreatedAt: mocks.Contract4CreatedAt, + WatchedAt: mocks.WatchedAt3, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract2Address, + CreatedAt: mocks.Contract2CreatedAt, + WatchedAt: mocks.WatchedAt3, + LastFilledAt: mocks.LastFilledAt, + }, + { + Address: mocks.Contract3Address, + CreatedAt: mocks.Contract3CreatedAt, + WatchedAt: mocks.WatchedAt3, + LastFilledAt: mocks.LastFilledAt, + }, + } + + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestLoadWatchedAddresses(t *testing.T, ind interfaces.StateDiffIndexer) { + expectedData := []common.Address{ + common.HexToAddress(mocks.Contract4Address), + common.HexToAddress(mocks.Contract2Address), + common.HexToAddress(mocks.Contract3Address), + } + + rows, err := ind.LoadWatchedAddresses() + require.NoError(t, err) + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestClearWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{} + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} + +func TestClearEmptyWatchedAddresses(t *testing.T, db sql.Database) { + expectedData := []res{} + rows := []res{} + err = db.Select(context.Background(), &rows, watchedAddressesPgGet) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, len(expectedData), len(rows)) + for idx, row := range rows { + require.Equal(t, expectedData[idx], row) + } +} diff --git a/statediff/indexer/test_helpers/mainnet_test_helpers.go b/statediff/indexer/test_helpers/mainnet_test_helpers.go index 141bb10fd..faedee5b8 100644 --- a/statediff/indexer/test_helpers/mainnet_test_helpers.go +++ b/statediff/indexer/test_helpers/mainnet_test_helpers.go @@ -62,6 +62,18 @@ var DefaultTestConfig = TestConfig{ LocalCache: true, } +func GetTestConfig() TestConfig { + conf := DefaultTestConfig + rawURL := os.Getenv(TEST_RAW_URL) + if rawURL == "" { + fmt.Printf("Warning: no raw url configured for statediffing mainnet tests, will look for local file and"+ + "then try default endpoint (%s)\r\n", DefaultTestConfig.RawURL) + } else { + conf.RawURL = rawURL + } + return conf +} + // TestBlockAndReceiptsFromEnv retrieves the block and receipts using env variables to override default config block number func TestBlockAndReceiptsFromEnv(conf TestConfig) (*types.Block, types.Receipts, error) { blockNumberStr := os.Getenv(TEST_BLOCK_NUMBER) diff --git a/statediff/indexer/test_helpers/test_helpers.go b/statediff/indexer/test_helpers/test_helpers.go index 6073db434..1b5335b78 100644 --- a/statediff/indexer/test_helpers/test_helpers.go +++ b/statediff/indexer/test_helpers/test_helpers.go @@ -16,6 +16,15 @@ package test_helpers +import ( + "bufio" + "context" + "os" + "testing" + + "github.com/ethereum/go-ethereum/statediff/indexer/database/sql" +) + // ListContainsString used to check if a list of strings contains a particular string func ListContainsString(sss []string, s string) bool { for _, str := range sss { @@ -25,3 +34,98 @@ func ListContainsString(sss []string, s string) bool { } return false } + +// DedupFile removes duplicates from the given file +func DedupFile(filePath string) error { + f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDONLY, os.ModePerm) + if err != nil { + return err + } + + stmts := make(map[string]struct{}, 0) + sc := bufio.NewScanner(f) + + for sc.Scan() { + s := sc.Text() + stmts[s] = struct{}{} + } + if err != nil { + return err + } + + f.Close() + + f, err = os.Create(filePath) + if err != nil { + return err + } + defer f.Close() + + for stmt := range stmts { + f.Write([]byte(stmt + "\n")) + } + + return nil +} + +// TearDownDB is used to tear down the watcher dbs after tests +func TearDownDB(t *testing.T, db sql.Database) { + ctx := context.Background() + tx, err := db.Begin(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.uncle_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.state_accounts`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.access_list_elements`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth.log_cids`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM blocks`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM nodes`) + if err != nil { + t.Fatal(err) + } + _, err = tx.Exec(ctx, `DELETE FROM eth_meta.watched_addresses`) + if err != nil { + t.Fatal(err) + } + err = tx.Commit(ctx) + if err != nil { + t.Fatal(err) + } +}