Add indexer tests for handling non canonical blocks #254
5
.github/workflows/tests.yml
vendored
5
.github/workflows/tests.yml
vendored
@ -4,8 +4,8 @@ on:
|
|||||||
workflow_call:
|
workflow_call:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || '382aca8e42bc5e33f301f77cdd2e09cc80602fc3'}}
|
stack-orchestrator-ref: ${{ github.event.inputs.stack-orchestrator-ref || 'f2fd766f5400fcb9eb47b50675d2e3b1f2753702'}}
|
||||||
ipld-eth-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || '65b7bee7a6757c1fc527c8bfdc4f99ab915fcf36' }}
|
ipld-eth-db-ref: ${{ github.event.inputs.ipld-ethcl-db-ref || 'be345e0733d2c025e4082c5154e441317ae94cf7' }}
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -99,6 +99,7 @@ jobs:
|
|||||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ > $GITHUB_WORKSPACE/config.sh
|
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ > $GITHUB_WORKSPACE/config.sh
|
||||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ >> $GITHUB_WORKSPACE/config.sh
|
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ >> $GITHUB_WORKSPACE/config.sh
|
||||||
echo db_write=true >> $GITHUB_WORKSPACE/config.sh
|
echo db_write=true >> $GITHUB_WORKSPACE/config.sh
|
||||||
|
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> $GITHUB_WORKSPACE/config.sh
|
||||||
cat $GITHUB_WORKSPACE/config.sh
|
cat $GITHUB_WORKSPACE/config.sh
|
||||||
|
|
||||||
- name: Compile Geth
|
- name: Compile Geth
|
||||||
|
@ -5,7 +5,7 @@ services:
|
|||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- ipld-eth-db
|
- ipld-eth-db
|
||||||
image: vulcanize/ipld-eth-db:v4.2.0-alpha
|
image: vulcanize/ipld-eth-db:v4.2.1-alpha
|
||||||
environment:
|
environment:
|
||||||
DATABASE_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
DATABASE_NAME: "vulcanize_testing"
|
DATABASE_NAME: "vulcanize_testing"
|
||||||
|
@ -2,20 +2,26 @@ package sql_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -25,10 +31,17 @@ var (
|
|||||||
ipfsPgGet = `SELECT data FROM public.blocks
|
ipfsPgGet = `SELECT data FROM public.blocks
|
||||||
WHERE key = $1 AND block_number = $2`
|
WHERE key = $1 AND block_number = $2`
|
||||||
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
|
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
|
||||||
mockBlock *types.Block
|
nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte
|
||||||
headerCID, trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
|
nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte
|
||||||
|
mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block
|
||||||
|
headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid
|
||||||
|
trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
|
||||||
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
|
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
|
||||||
|
nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid
|
||||||
|
nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid
|
||||||
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
|
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
|
||||||
|
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte
|
||||||
|
nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte
|
||||||
state1CID, state2CID, storageCID cid.Cid
|
state1CID, state2CID, storageCID cid.Cid
|
||||||
contract1Address, contract2Address, contract3Address, contract4Address string
|
contract1Address, contract2Address, contract3Address, contract4Address string
|
||||||
contract1CreatedAt, contract2CreatedAt, contract3CreatedAt, contract4CreatedAt uint64
|
contract1CreatedAt, contract2CreatedAt, contract3CreatedAt, contract4CreatedAt uint64
|
||||||
@ -41,9 +54,19 @@ func init() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// canonical block at LondonBlock height
|
||||||
mockBlock = mocks.MockBlock
|
mockBlock = mocks.MockBlock
|
||||||
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
|
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
|
||||||
|
|
||||||
|
// non-canonical block at LondonBlock height
|
||||||
|
mockNonCanonicalBlock = mocks.MockNonCanonicalBlock
|
||||||
|
nonCanonicalBlockRcts := mocks.MockNonCanonicalBlockReceipts
|
||||||
|
|
||||||
|
// non-canonical block at LondonBlock height + 1
|
||||||
|
mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2
|
||||||
|
nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts
|
||||||
|
|
||||||
|
// encode mock receipts
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
txs.EncodeIndex(0, buf)
|
txs.EncodeIndex(0, buf)
|
||||||
tx1 = make([]byte, buf.Len())
|
tx1 = make([]byte, buf.Len())
|
||||||
@ -95,7 +118,30 @@ func init() {
|
|||||||
copy(rct5, buf.Bytes())
|
copy(rct5, buf.Bytes())
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
|
|
||||||
|
// encode mock receipts for non-canonical blocks
|
||||||
|
nonCanonicalBlockRcts.EncodeIndex(0, buf)
|
||||||
|
nonCanonicalBlockRct1 = make([]byte, buf.Len())
|
||||||
|
copy(nonCanonicalBlockRct1, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
nonCanonicalBlockRcts.EncodeIndex(1, buf)
|
||||||
|
nonCanonicalBlockRct2 = make([]byte, buf.Len())
|
||||||
|
copy(nonCanonicalBlockRct2, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
nonCanonicalBlock2Rcts.EncodeIndex(0, buf)
|
||||||
|
nonCanonicalBlock2Rct1 = make([]byte, buf.Len())
|
||||||
|
copy(nonCanonicalBlock2Rct1, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
nonCanonicalBlock2Rcts.EncodeIndex(1, buf)
|
||||||
|
nonCanonicalBlock2Rct2 = make([]byte, buf.Len())
|
||||||
|
copy(nonCanonicalBlock2Rct2, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
|
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
|
||||||
|
mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256)
|
||||||
|
mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256)
|
||||||
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
|
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
|
||||||
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
|
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
|
||||||
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
|
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
|
||||||
@ -105,26 +151,8 @@ func init() {
|
|||||||
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
|
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
|
||||||
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
|
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
|
||||||
|
|
||||||
receiptTrie := ipld.NewRctTrie()
|
// create raw receipts
|
||||||
|
rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5})
|
||||||
receiptTrie.Add(0, rct1)
|
|
||||||
receiptTrie.Add(1, rct2)
|
|
||||||
receiptTrie.Add(2, rct3)
|
|
||||||
receiptTrie.Add(3, rct4)
|
|
||||||
receiptTrie.Add(4, rct5)
|
|
||||||
|
|
||||||
rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
|
|
||||||
|
|
||||||
rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
|
|
||||||
orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
|
|
||||||
for i, rln := range rctLeafNodes {
|
|
||||||
var idx uint
|
|
||||||
|
|
||||||
r := bytes.NewReader(keys[i].TrieKey)
|
|
||||||
rlp.Decode(r, &idx)
|
|
||||||
rctleafNodeCids[idx] = rln.Cid()
|
|
||||||
orderedRctLeafNodes[idx] = rln.RawData()
|
|
||||||
}
|
|
||||||
|
|
||||||
rct1CID = rctleafNodeCids[0]
|
rct1CID = rctleafNodeCids[0]
|
||||||
rct2CID = rctleafNodeCids[1]
|
rct2CID = rctleafNodeCids[1]
|
||||||
@ -132,11 +160,28 @@ func init() {
|
|||||||
rct4CID = rctleafNodeCids[3]
|
rct4CID = rctleafNodeCids[3]
|
||||||
rct5CID = rctleafNodeCids[4]
|
rct5CID = rctleafNodeCids[4]
|
||||||
|
|
||||||
rctLeaf1 = orderedRctLeafNodes[0]
|
rctLeaf1 = rawRctLeafNodes[0]
|
||||||
rctLeaf2 = orderedRctLeafNodes[1]
|
rctLeaf2 = rawRctLeafNodes[1]
|
||||||
rctLeaf3 = orderedRctLeafNodes[2]
|
rctLeaf3 = rawRctLeafNodes[2]
|
||||||
rctLeaf4 = orderedRctLeafNodes[3]
|
rctLeaf4 = rawRctLeafNodes[3]
|
||||||
rctLeaf5 = orderedRctLeafNodes[4]
|
rctLeaf5 = rawRctLeafNodes[4]
|
||||||
|
|
||||||
|
// create raw receipts for non-canonical blocks
|
||||||
|
nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
|
||||||
|
|
||||||
|
nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0]
|
||||||
|
nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1]
|
||||||
|
|
||||||
|
nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0]
|
||||||
|
nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1]
|
||||||
|
|
||||||
|
nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
|
||||||
|
|
||||||
|
nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0]
|
||||||
|
nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1]
|
||||||
|
|
||||||
|
nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0]
|
||||||
|
nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1]
|
||||||
|
|
||||||
contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F"
|
contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F"
|
||||||
contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B"
|
contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B"
|
||||||
@ -153,6 +198,56 @@ func init() {
|
|||||||
watchedAt3 = uint64(20)
|
watchedAt3 = uint64(20)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createRctTrie creates a receipt trie from the given raw receipts
|
||||||
|
// returns receipt leaf nodes and their CIDs
|
||||||
|
func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) {
|
||||||
|
receiptTrie := ipld.NewRctTrie()
|
||||||
|
|
||||||
|
for i, rct := range rcts {
|
||||||
|
receiptTrie.Add(i, rct)
|
||||||
|
}
|
||||||
|
rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
|
||||||
|
|
||||||
|
rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
|
||||||
|
orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
|
||||||
|
for i, rln := range rctLeafNodes {
|
||||||
|
var idx uint
|
||||||
|
|
||||||
|
r := bytes.NewReader(keys[i].TrieKey)
|
||||||
|
rlp.Decode(r, &idx)
|
||||||
|
rctleafNodeCids[idx] = rln.Cid()
|
||||||
|
orderedRctLeafNodes[idx] = rln.RawData()
|
||||||
|
}
|
||||||
|
|
||||||
|
return orderedRctLeafNodes, rctleafNodeCids
|
||||||
|
}
|
||||||
|
|
||||||
|
// createRctModel creates a models.ReceiptModel object from a given ethereum receipt
|
||||||
|
func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.ReceiptModel {
|
||||||
|
rctModel := models.ReceiptModel{
|
||||||
|
BlockNumber: blockNumber,
|
||||||
|
HeaderID: rct.BlockHash.String(),
|
||||||
|
TxID: rct.TxHash.String(),
|
||||||
|
LeafCID: cid.String(),
|
||||||
|
LeafMhKey: shared.MultihashKeyFromCID(cid),
|
||||||
|
LogRoot: rct.LogRoot.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
contract := shared.HandleZeroAddr(rct.ContractAddress)
|
||||||
|
rctModel.Contract = contract
|
||||||
|
if contract != "" {
|
||||||
|
rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rct.PostState) == 0 {
|
||||||
|
rctModel.PostStatus = rct.Status
|
||||||
|
} else {
|
||||||
|
rctModel.PostState = common.Bytes2Hex(rct.PostState)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rctModel
|
||||||
|
}
|
||||||
|
|
||||||
func expectTrue(t *testing.T, value bool) {
|
func expectTrue(t *testing.T, value bool) {
|
||||||
if !value {
|
if !value {
|
||||||
t.Fatalf("Assertion failed")
|
t.Fatalf("Assertion failed")
|
||||||
@ -170,3 +265,704 @@ func tearDown(t *testing.T) {
|
|||||||
err := ind.Close()
|
err := ind.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setupTestData indexes a single mock block along with it's state nodes
|
||||||
|
func setupTestData(t *testing.T) {
|
||||||
|
var tx interfaces.Batch
|
||||||
|
tx, err = ind.PushBlock(
|
||||||
|
mockBlock,
|
||||||
|
mocks.MockReceipts,
|
||||||
|
mocks.MockBlock.Difficulty())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := tx.Submit(err); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
for _, node := range mocks.StateDiffs {
|
||||||
|
err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, mocks.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupTestData indexes a mock block and a non-canonical mock block at London height
|
||||||
|
// and a non-canonical block at London height + 1
|
||||||
|
// along with their state nodes
|
||||||
|
func setupTestDataNonCanonical(t *testing.T) {
|
||||||
|
// index a canonical block at London height
|
||||||
|
var tx1 interfaces.Batch
|
||||||
|
tx1, err = ind.PushBlock(
|
||||||
|
mockBlock,
|
||||||
|
mocks.MockReceipts,
|
||||||
|
mocks.MockBlock.Difficulty())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, node := range mocks.StateDiffs {
|
||||||
|
err = ind.PushStateNode(tx1, node, mockBlock.Hash().String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, mocks.BlockNumber.String(), tx1.(*sql.BatchTx).BlockNumber)
|
||||||
|
if err := tx1.Submit(err); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// index a non-canonical block at London height
|
||||||
|
// has transactions overlapping with that of the canonical block
|
||||||
|
var tx2 interfaces.Batch
|
||||||
|
tx2, err = ind.PushBlock(
|
||||||
|
mockNonCanonicalBlock,
|
||||||
|
mocks.MockNonCanonicalBlockReceipts,
|
||||||
|
mockNonCanonicalBlock.Difficulty())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, node := range mocks.StateDiffs {
|
||||||
|
err = ind.PushStateNode(tx2, node, mockNonCanonicalBlock.Hash().String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, mocks.BlockNumber.String(), tx2.(*sql.BatchTx).BlockNumber)
|
||||||
|
if err := tx2.Submit(err); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// index a non-canonical block at London height + 1
|
||||||
|
// has transactions overlapping with that of the canonical block
|
||||||
|
var tx3 interfaces.Batch
|
||||||
|
tx3, err = ind.PushBlock(
|
||||||
|
mockNonCanonicalBlock2,
|
||||||
|
mocks.MockNonCanonicalBlock2Receipts,
|
||||||
|
mockNonCanonicalBlock2.Difficulty())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, node := range mocks.StateDiffs[:2] {
|
||||||
|
err = ind.PushStateNode(tx3, node, mockNonCanonicalBlock2.Hash().String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, mocks.Block2Number.String(), tx3.(*sql.BatchTx).BlockNumber)
|
||||||
|
if err := tx3.Submit(err); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexHeaderNonCanonical(t *testing.T) {
|
||||||
|
// check indexed headers
|
||||||
|
pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT),
|
||||||
|
tx_root, receipt_root, uncle_root, coinbase
|
||||||
|
FROM eth.header_cids
|
||||||
|
ORDER BY block_number`
|
||||||
|
headerRes := make([]models.HeaderModel, 0)
|
||||||
|
err = db.Select(context.Background(), &headerRes, pgStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expect three blocks to be indexed
|
||||||
|
// a canonical and a non-canonical block at London height,
|
||||||
|
// non-canonical block at London height + 1
|
||||||
|
expectedRes := []models.HeaderModel{
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
BlockHash: mockBlock.Hash().String(),
|
||||||
|
CID: headerCID.String(),
|
||||||
|
TotalDifficulty: mockBlock.Difficulty().String(),
|
||||||
|
TxRoot: mockBlock.TxHash().String(),
|
||||||
|
RctRoot: mockBlock.ReceiptHash().String(),
|
||||||
|
UncleRoot: mockBlock.UncleHash().String(),
|
||||||
|
Coinbase: mocks.MockHeader.Coinbase.String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||||
|
BlockHash: mockNonCanonicalBlock.Hash().String(),
|
||||||
|
CID: mockNonCanonicalHeaderCID.String(),
|
||||||
|
TotalDifficulty: mockNonCanonicalBlock.Difficulty().String(),
|
||||||
|
TxRoot: mockNonCanonicalBlock.TxHash().String(),
|
||||||
|
RctRoot: mockNonCanonicalBlock.ReceiptHash().String(),
|
||||||
|
UncleRoot: mockNonCanonicalBlock.UncleHash().String(),
|
||||||
|
Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||||
|
BlockHash: mockNonCanonicalBlock2.Hash().String(),
|
||||||
|
CID: mockNonCanonicalHeader2CID.String(),
|
||||||
|
TotalDifficulty: mockNonCanonicalBlock2.Difficulty().String(),
|
||||||
|
TxRoot: mockNonCanonicalBlock2.TxHash().String(),
|
||||||
|
RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(),
|
||||||
|
UncleRoot: mockNonCanonicalBlock2.UncleHash().String(),
|
||||||
|
Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expectedRes[0].Reward = shared.CalcEthBlockReward(mockBlock.Header(), mockBlock.Uncles(), mockBlock.Transactions(), mocks.MockReceipts).String()
|
||||||
|
expectedRes[1].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock.Header(), mockNonCanonicalBlock.Uncles(), mockNonCanonicalBlock.Transactions(), mocks.MockNonCanonicalBlockReceipts).String()
|
||||||
|
expectedRes[2].Reward = shared.CalcEthBlockReward(mockNonCanonicalBlock2.Header(), mockNonCanonicalBlock2.Uncles(), mockNonCanonicalBlock2.Transactions(), mocks.MockNonCanonicalBlock2Receipts).String()
|
||||||
|
|
||||||
|
require.Equal(t, len(expectedRes), len(headerRes))
|
||||||
|
require.ElementsMatch(t,
|
||||||
|
[]string{mockBlock.Hash().String(), mockNonCanonicalBlock.Hash().String(), mockNonCanonicalBlock2.Hash().String()},
|
||||||
|
[]string{headerRes[0].BlockHash, headerRes[1].BlockHash, headerRes[2].BlockHash},
|
||||||
|
)
|
||||||
|
|
||||||
|
if headerRes[0].BlockHash == mockBlock.Hash().String() {
|
||||||
|
require.Equal(t, expectedRes[0], headerRes[0])
|
||||||
|
require.Equal(t, expectedRes[1], headerRes[1])
|
||||||
|
require.Equal(t, expectedRes[2], headerRes[2])
|
||||||
|
} else {
|
||||||
|
require.Equal(t, expectedRes[1], headerRes[0])
|
||||||
|
require.Equal(t, expectedRes[0], headerRes[1])
|
||||||
|
require.Equal(t, expectedRes[2], headerRes[2])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check indexed IPLD blocks
|
||||||
|
headerCIDs := []cid.Cid{headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID}
|
||||||
|
blockNumbers := []uint64{mocks.BlockNumber.Uint64(), mocks.BlockNumber.Uint64(), mocks.Block2Number.Uint64()}
|
||||||
|
headerRLPs := [][]byte{mocks.MockHeaderRlp, mocks.MockNonCanonicalHeaderRlp, mocks.MockNonCanonicalHeader2Rlp}
|
||||||
|
for i := range expectedRes {
|
||||||
|
var data []byte
|
||||||
|
prefixedKey := shared.MultihashKeyFromCID(headerCIDs[i])
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, blockNumbers[i])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, headerRLPs[i], data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexTransactionsNonCanonical(t *testing.T) {
|
||||||
|
// check indexed transactions
|
||||||
|
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index,
|
||||||
|
tx_data, tx_type, CAST(value as TEXT)
|
||||||
|
FROM eth.transaction_cids
|
||||||
|
ORDER BY block_number, index`
|
||||||
|
txRes := make([]models.TxModel, 0)
|
||||||
|
err = db.Select(context.Background(), &txRes, pgStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected transactions in the canonical block
|
||||||
|
mockBlockTxs := mocks.MockBlock.Transactions()
|
||||||
|
expectedBlockTxs := []models.TxModel{
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
HeaderID: mockBlock.Hash().String(),
|
||||||
|
TxHash: mockBlockTxs[0].Hash().String(),
|
||||||
|
CID: trx1CID.String(),
|
||||||
|
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[0].To()),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 0,
|
||||||
|
Data: mockBlockTxs[0].Data(),
|
||||||
|
Type: mockBlockTxs[0].Type(),
|
||||||
|
Value: mockBlockTxs[0].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
HeaderID: mockBlock.Hash().String(),
|
||||||
|
TxHash: mockBlockTxs[1].Hash().String(),
|
||||||
|
CID: trx2CID.String(),
|
||||||
|
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[1].To()),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 1,
|
||||||
|
Data: mockBlockTxs[1].Data(),
|
||||||
|
Type: mockBlockTxs[1].Type(),
|
||||||
|
Value: mockBlockTxs[1].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
HeaderID: mockBlock.Hash().String(),
|
||||||
|
TxHash: mockBlockTxs[2].Hash().String(),
|
||||||
|
CID: trx3CID.String(),
|
||||||
|
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[2].To()),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 2,
|
||||||
|
Data: mockBlockTxs[2].Data(),
|
||||||
|
Type: mockBlockTxs[2].Type(),
|
||||||
|
Value: mockBlockTxs[2].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
HeaderID: mockBlock.Hash().String(),
|
||||||
|
TxHash: mockBlockTxs[3].Hash().String(),
|
||||||
|
CID: trx4CID.String(),
|
||||||
|
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[3].To()),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 3,
|
||||||
|
Data: mockBlockTxs[3].Data(),
|
||||||
|
Type: mockBlockTxs[3].Type(),
|
||||||
|
Value: mockBlockTxs[3].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockBlock.Number().String(),
|
||||||
|
HeaderID: mockBlock.Hash().String(),
|
||||||
|
TxHash: mockBlockTxs[4].Hash().String(),
|
||||||
|
CID: trx5CID.String(),
|
||||||
|
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[4].To()),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 4,
|
||||||
|
Data: mockBlockTxs[4].Data(),
|
||||||
|
Type: mockBlockTxs[4].Type(),
|
||||||
|
Value: mockBlockTxs[4].Value().String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected transactions in the canonical block at London height
|
||||||
|
mockNonCanonicalBlockTxs := mockNonCanonicalBlock.Transactions()
|
||||||
|
expectedNonCanonicalBlockTxs := []models.TxModel{
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||||
|
HeaderID: mockNonCanonicalBlock.Hash().String(),
|
||||||
|
TxHash: mockNonCanonicalBlockTxs[0].Hash().String(),
|
||||||
|
CID: trx2CID.String(),
|
||||||
|
Dst: mockNonCanonicalBlockTxs[0].To().String(),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 0,
|
||||||
|
Data: mockNonCanonicalBlockTxs[0].Data(),
|
||||||
|
Type: mockNonCanonicalBlockTxs[0].Type(),
|
||||||
|
Value: mockNonCanonicalBlockTxs[0].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||||
|
HeaderID: mockNonCanonicalBlock.Hash().String(),
|
||||||
|
TxHash: mockNonCanonicalBlockTxs[1].Hash().String(),
|
||||||
|
CID: trx5CID.String(),
|
||||||
|
Dst: mockNonCanonicalBlockTxs[1].To().String(),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 1,
|
||||||
|
Data: mockNonCanonicalBlockTxs[1].Data(),
|
||||||
|
Type: mockNonCanonicalBlockTxs[1].Type(),
|
||||||
|
Value: mockNonCanonicalBlockTxs[1].Value().String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected transactions in the canonical block at London height + 1
|
||||||
|
mockNonCanonicalBlock2Txs := mockNonCanonicalBlock2.Transactions()
|
||||||
|
expectedNonCanonicalBlock2Txs := []models.TxModel{
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||||
|
HeaderID: mockNonCanonicalBlock2.Hash().String(),
|
||||||
|
TxHash: mockNonCanonicalBlock2Txs[0].Hash().String(),
|
||||||
|
CID: trx3CID.String(),
|
||||||
|
Dst: "",
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 0,
|
||||||
|
Data: mockNonCanonicalBlock2Txs[0].Data(),
|
||||||
|
Type: mockNonCanonicalBlock2Txs[0].Type(),
|
||||||
|
Value: mockNonCanonicalBlock2Txs[0].Value().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||||
|
HeaderID: mockNonCanonicalBlock2.Hash().String(),
|
||||||
|
TxHash: mockNonCanonicalBlock2Txs[1].Hash().String(),
|
||||||
|
CID: trx5CID.String(),
|
||||||
|
Dst: mockNonCanonicalBlock2Txs[1].To().String(),
|
||||||
|
Src: mocks.SenderAddr.String(),
|
||||||
|
Index: 1,
|
||||||
|
Data: mockNonCanonicalBlock2Txs[1].Data(),
|
||||||
|
Type: mockNonCanonicalBlock2Txs[1].Type(),
|
||||||
|
Value: mockNonCanonicalBlock2Txs[1].Value().String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+len(expectedNonCanonicalBlock2Txs), len(txRes))
|
||||||
|
|
||||||
|
// sort results such that non-canonical block transactions come after canonical block ones
|
||||||
|
sort.SliceStable(txRes, func(i, j int) bool {
|
||||||
|
if txRes[i].BlockNumber < txRes[j].BlockNumber {
|
||||||
|
return true
|
||||||
|
} else if txRes[i].HeaderID == txRes[j].HeaderID {
|
||||||
|
return txRes[i].Index < txRes[j].Index
|
||||||
|
} else if txRes[i].HeaderID == mockBlock.Hash().String() {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
for i, expectedTx := range expectedBlockTxs {
|
||||||
|
require.Equal(t, expectedTx, txRes[i])
|
||||||
|
}
|
||||||
|
for i, expectedTx := range expectedNonCanonicalBlockTxs {
|
||||||
|
require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+i])
|
||||||
|
}
|
||||||
|
for i, expectedTx := range expectedNonCanonicalBlock2Txs {
|
||||||
|
require.Equal(t, expectedTx, txRes[len(expectedBlockTxs)+len(expectedNonCanonicalBlockTxs)+i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check indexed IPLD blocks
|
||||||
|
var data []byte
|
||||||
|
var prefixedKey string
|
||||||
|
|
||||||
|
txCIDs := []cid.Cid{trx1CID, trx2CID, trx3CID, trx4CID, trx5CID}
|
||||||
|
txRLPs := [][]byte{tx1, tx2, tx3, tx4, tx5}
|
||||||
|
for i, txCID := range txCIDs {
|
||||||
|
prefixedKey = shared.MultihashKeyFromCID(txCID)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, txRLPs[i], data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexReceiptsNonCanonical(t *testing.T) {
|
||||||
|
// check indexed receipts
|
||||||
|
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, leaf_cid, leaf_mh_key, post_status, post_state, contract, contract_hash, log_root
|
||||||
|
FROM eth.receipt_cids
|
||||||
|
ORDER BY block_number`
|
||||||
|
rctRes := make([]models.ReceiptModel, 0)
|
||||||
|
err = db.Select(context.Background(), &rctRes, pgStr)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected receipts in the canonical block
|
||||||
|
rctCids := []cid.Cid{rct1CID, rct2CID, rct3CID, rct4CID, rct5CID}
|
||||||
|
expectedBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockReceipts))
|
||||||
|
for i, mockBlockRct := range mocks.MockReceipts {
|
||||||
|
rctModel := createRctModel(mockBlockRct, rctCids[i], mockBlock.Number().String())
|
||||||
|
expectedBlockRctsMap[rctCids[i].String()] = rctModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected receipts in the canonical block at London height + 1
|
||||||
|
nonCanonicalBlockRctCids := []cid.Cid{nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID}
|
||||||
|
expectedNonCanonicalBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlockReceipts))
|
||||||
|
for i, mockNonCanonicalBlockRct := range mocks.MockNonCanonicalBlockReceipts {
|
||||||
|
rctModel := createRctModel(mockNonCanonicalBlockRct, nonCanonicalBlockRctCids[i], mockNonCanonicalBlock.Number().String())
|
||||||
|
expectedNonCanonicalBlockRctsMap[nonCanonicalBlockRctCids[i].String()] = rctModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// expected receipts in the canonical block at London height + 1
|
||||||
|
nonCanonicalBlock2RctCids := []cid.Cid{nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID}
|
||||||
|
expectedNonCanonicalBlock2RctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlock2Receipts))
|
||||||
|
for i, mockNonCanonicalBlock2Rct := range mocks.MockNonCanonicalBlock2Receipts {
|
||||||
|
rctModel := createRctModel(mockNonCanonicalBlock2Rct, nonCanonicalBlock2RctCids[i], mockNonCanonicalBlock2.Number().String())
|
||||||
|
expectedNonCanonicalBlock2RctsMap[nonCanonicalBlock2RctCids[i].String()] = rctModel
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+len(expectedNonCanonicalBlock2RctsMap), len(rctRes))
|
||||||
|
|
||||||
|
// sort results such that non-canonical block reciepts come after canonical block ones
|
||||||
|
sort.SliceStable(rctRes, func(i, j int) bool {
|
||||||
|
if rctRes[i].BlockNumber < rctRes[j].BlockNumber {
|
||||||
|
return true
|
||||||
|
} else if rctRes[i].HeaderID == rctRes[j].HeaderID {
|
||||||
|
return false
|
||||||
|
} else if rctRes[i].HeaderID == mockBlock.Hash().String() {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
for i := 0; i < len(expectedBlockRctsMap); i++ {
|
||||||
|
rct := rctRes[i]
|
||||||
|
require.Contains(t, expectedBlockRctsMap, rct.LeafCID)
|
||||||
|
require.Equal(t, expectedBlockRctsMap[rct.LeafCID], rct)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(expectedNonCanonicalBlockRctsMap); i++ {
|
||||||
|
rct := rctRes[len(expectedBlockRctsMap)+i]
|
||||||
|
require.Contains(t, expectedNonCanonicalBlockRctsMap, rct.LeafCID)
|
||||||
|
require.Equal(t, expectedNonCanonicalBlockRctsMap[rct.LeafCID], rct)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(expectedNonCanonicalBlock2RctsMap); i++ {
|
||||||
|
rct := rctRes[len(expectedBlockRctsMap)+len(expectedNonCanonicalBlockRctsMap)+i]
|
||||||
|
require.Contains(t, expectedNonCanonicalBlock2RctsMap, rct.LeafCID)
|
||||||
|
require.Equal(t, expectedNonCanonicalBlock2RctsMap[rct.LeafCID], rct)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check indexed rct IPLD blocks
|
||||||
|
var data []byte
|
||||||
|
var prefixedKey string
|
||||||
|
|
||||||
|
rctRLPs := [][]byte{
|
||||||
|
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5,
|
||||||
|
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2,
|
||||||
|
}
|
||||||
|
for i, rctCid := range append(rctCids, nonCanonicalBlockRctCids...) {
|
||||||
|
prefixedKey = shared.MultihashKeyFromCID(rctCid)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, rctRLPs[i], data)
|
||||||
|
}
|
||||||
|
|
||||||
|
nonCanonicalBlock2RctRLPs := [][]byte{nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2}
|
||||||
|
for i, rctCid := range nonCanonicalBlock2RctCids {
|
||||||
|
prefixedKey = shared.MultihashKeyFromCID(rctCid)
|
||||||
|
err = db.Get(context.Background(), &data, ipfsPgGet, prefixedKey, mocks.Block2Number.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, nonCanonicalBlock2RctRLPs[i], data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexLogsNonCanonical(t *testing.T) {
|
||||||
|
// check indexed logs
|
||||||
|
pgStr := `SELECT address, log_data, topic0, topic1, topic2, topic3, data
|
||||||
|
FROM eth.log_cids
|
||||||
|
INNER JOIN public.blocks ON (log_cids.block_number = blocks.block_number AND log_cids.leaf_mh_key = blocks.key)
|
||||||
|
WHERE log_cids.block_number = $1 AND header_id = $2 AND rct_id = $3
|
||||||
|
ORDER BY log_cids.index ASC`
|
||||||
|
|
||||||
|
type rctWithBlockHash struct {
|
||||||
|
rct *types.Receipt
|
||||||
|
blockHash string
|
||||||
|
blockNumber uint64
|
||||||
|
}
|
||||||
|
mockRcts := make([]rctWithBlockHash, 0)
|
||||||
|
|
||||||
|
// logs in the canonical block
|
||||||
|
for _, mockBlockRct := range mocks.MockReceipts {
|
||||||
|
mockRcts = append(mockRcts, rctWithBlockHash{
|
||||||
|
mockBlockRct,
|
||||||
|
mockBlock.Hash().String(),
|
||||||
|
mockBlock.NumberU64(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// logs in the canonical block at London height + 1
|
||||||
|
for _, mockBlockRct := range mocks.MockNonCanonicalBlockReceipts {
|
||||||
|
mockRcts = append(mockRcts, rctWithBlockHash{
|
||||||
|
mockBlockRct,
|
||||||
|
mockNonCanonicalBlock.Hash().String(),
|
||||||
|
mockNonCanonicalBlock.NumberU64(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// logs in the canonical block at London height + 1
|
||||||
|
for _, mockBlockRct := range mocks.MockNonCanonicalBlock2Receipts {
|
||||||
|
mockRcts = append(mockRcts, rctWithBlockHash{
|
||||||
|
mockBlockRct,
|
||||||
|
mockNonCanonicalBlock2.Hash().String(),
|
||||||
|
mockNonCanonicalBlock2.NumberU64(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, mockRct := range mockRcts {
|
||||||
|
type logWithIPLD struct {
|
||||||
|
models.LogsModel
|
||||||
|
IPLDData []byte `db:"data"`
|
||||||
|
}
|
||||||
|
logRes := make([]logWithIPLD, 0)
|
||||||
|
err = db.Select(context.Background(), &logRes, pgStr, mockRct.blockNumber, mockRct.blockHash, mockRct.rct.TxHash.String())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, len(mockRct.rct.Logs), len(logRes))
|
||||||
|
|
||||||
|
for i, log := range mockRct.rct.Logs {
|
||||||
|
topicSet := make([]string, 4)
|
||||||
|
for ti, topic := range log.Topics {
|
||||||
|
topicSet[ti] = topic.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedLog := models.LogsModel{
|
||||||
|
Address: log.Address.String(),
|
||||||
|
Data: log.Data,
|
||||||
|
Topic0: topicSet[0],
|
||||||
|
Topic1: topicSet[1],
|
||||||
|
Topic2: topicSet[2],
|
||||||
|
Topic3: topicSet[3],
|
||||||
|
}
|
||||||
|
require.Equal(t, expectedLog, logRes[i].LogsModel)
|
||||||
|
|
||||||
|
// check indexed log IPLD block
|
||||||
|
var nodeElements []interface{}
|
||||||
|
err = rlp.DecodeBytes(logRes[i].IPLDData, &nodeElements)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(nodeElements) == 2 {
|
||||||
|
logRaw, err := rlp.EncodeToBytes(log)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// 2nd element of the leaf node contains the encoded log data.
|
||||||
|
require.Equal(t, nodeElements[1].([]byte), logRaw)
|
||||||
|
} else {
|
||||||
|
logRaw, err := rlp.EncodeToBytes(log)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// raw log was IPLDized
|
||||||
|
require.Equal(t, logRes[i].IPLDData, logRaw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexStateNonCanonical(t *testing.T) {
|
||||||
|
// check indexed state nodes
|
||||||
|
pgStr := `SELECT state_path, state_leaf_key, node_type, cid, mh_key, diff
|
||||||
|
FROM eth.state_cids
|
||||||
|
WHERE block_number = $1
|
||||||
|
AND header_id = $2
|
||||||
|
ORDER BY state_path`
|
||||||
|
|
||||||
|
removedNodeCID, _ := cid.Decode(shared.RemovedNodeStateCID)
|
||||||
|
stateNodeCIDs := []cid.Cid{state1CID, state2CID, removedNodeCID, removedNodeCID}
|
||||||
|
|
||||||
|
// expected state nodes in the canonical and the non-canonical block at London height
|
||||||
|
expectedStateNodes := make([]models.StateNodeModel, 0)
|
||||||
|
for i, stateDiff := range mocks.StateDiffs {
|
||||||
|
expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{
|
||||||
|
Path: stateDiff.Path,
|
||||||
|
StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(),
|
||||||
|
NodeType: stateDiff.NodeType.Int(),
|
||||||
|
CID: stateNodeCIDs[i].String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]),
|
||||||
|
Diff: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Slice(expectedStateNodes, func(i, j int) bool {
|
||||||
|
if bytes.Compare(expectedStateNodes[i].Path, expectedStateNodes[j].Path) < 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// expected state nodes in the non-canonical block at London height + 1
|
||||||
|
expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0)
|
||||||
|
for i, stateDiff := range mocks.StateDiffs[:2] {
|
||||||
|
expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{
|
||||||
|
Path: stateDiff.Path,
|
||||||
|
StateKey: common.BytesToHash(stateDiff.LeafKey).Hex(),
|
||||||
|
NodeType: stateDiff.NodeType.Int(),
|
||||||
|
CID: stateNodeCIDs[i].String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(stateNodeCIDs[i]),
|
||||||
|
Diff: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state nodes for canonical block
|
||||||
|
stateNodes := make([]models.StateNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedStateNodes), len(stateNodes))
|
||||||
|
|
||||||
|
for i, expectedStateNode := range expectedStateNodes {
|
||||||
|
require.Equal(t, expectedStateNode, stateNodes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state nodes for non-canonical block at London height
|
||||||
|
stateNodes = make([]models.StateNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedStateNodes), len(stateNodes))
|
||||||
|
|
||||||
|
for i, expectedStateNode := range expectedStateNodes {
|
||||||
|
require.Equal(t, expectedStateNode, stateNodes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check state nodes for non-canonical block at London height + 1
|
||||||
|
stateNodes = make([]models.StateNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &stateNodes, pgStr, mocks.Block2Number.Uint64(), mockNonCanonicalBlock2.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedNonCanonicalBlock2StateNodes), len(stateNodes))
|
||||||
|
|
||||||
|
for i, expectedStateNode := range expectedNonCanonicalBlock2StateNodes {
|
||||||
|
require.Equal(t, expectedStateNode, stateNodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPublishAndIndexStorageNonCanonical(t *testing.T) {
|
||||||
|
// check indexed storage nodes
|
||||||
|
pgStr := `SELECT state_path, storage_path, storage_leaf_key, node_type, cid, mh_key, diff
|
||||||
|
FROM eth.storage_cids
|
||||||
|
WHERE block_number = $1
|
||||||
|
AND header_id = $2
|
||||||
|
ORDER BY state_path, storage_path`
|
||||||
|
|
||||||
|
removedNodeCID, _ := cid.Decode(shared.RemovedNodeStorageCID)
|
||||||
|
storageNodeCIDs := []cid.Cid{storageCID, removedNodeCID, removedNodeCID, removedNodeCID}
|
||||||
|
|
||||||
|
// expected state nodes in the canonical and the non-canonical block at London height
|
||||||
|
expectedStorageNodes := make([]models.StorageNodeModel, 0)
|
||||||
|
storageNodeIndex := 0
|
||||||
|
for _, stateDiff := range mocks.StateDiffs {
|
||||||
|
for _, storageNode := range stateDiff.StorageNodes {
|
||||||
|
expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{
|
||||||
|
StatePath: stateDiff.Path,
|
||||||
|
Path: storageNode.Path,
|
||||||
|
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
|
||||||
|
NodeType: storageNode.NodeType.Int(),
|
||||||
|
CID: storageNodeCIDs[storageNodeIndex].String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]),
|
||||||
|
Diff: true,
|
||||||
|
})
|
||||||
|
storageNodeIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Slice(expectedStorageNodes, func(i, j int) bool {
|
||||||
|
if bytes.Compare(expectedStorageNodes[i].Path, expectedStorageNodes[j].Path) < 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// expected state nodes in the non-canonical block at London height + 1
|
||||||
|
expectedNonCanonicalBlock2StorageNodes := make([]models.StorageNodeModel, 0)
|
||||||
|
storageNodeIndex = 0
|
||||||
|
for _, stateDiff := range mocks.StateDiffs[:2] {
|
||||||
|
for _, storageNode := range stateDiff.StorageNodes {
|
||||||
|
expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{
|
||||||
|
StatePath: stateDiff.Path,
|
||||||
|
Path: storageNode.Path,
|
||||||
|
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
|
||||||
|
NodeType: storageNode.NodeType.Int(),
|
||||||
|
CID: storageNodeCIDs[storageNodeIndex].String(),
|
||||||
|
MhKey: shared.MultihashKeyFromCID(storageNodeCIDs[storageNodeIndex]),
|
||||||
|
Diff: true,
|
||||||
|
})
|
||||||
|
storageNodeIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check storage nodes for canonical block
|
||||||
|
storageNodes := make([]models.StorageNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockBlock.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
|
||||||
|
|
||||||
|
for i, expectedStorageNode := range expectedStorageNodes {
|
||||||
|
require.Equal(t, expectedStorageNode, storageNodes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check storage nodes for non-canonical block at London height
|
||||||
|
storageNodes = make([]models.StorageNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &storageNodes, pgStr, mocks.BlockNumber.Uint64(), mockNonCanonicalBlock.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedStorageNodes), len(storageNodes))
|
||||||
|
|
||||||
|
for i, expectedStorageNode := range expectedStorageNodes {
|
||||||
|
require.Equal(t, expectedStorageNode, storageNodes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// check storage nodes for non-canonical block at London height + 1
|
||||||
|
storageNodes = make([]models.StorageNodeModel, 0)
|
||||||
|
err = db.Select(context.Background(), &storageNodes, pgStr, mockNonCanonicalBlock2.NumberU64(), mockNonCanonicalBlock2.Hash().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
require.Equal(t, len(expectedNonCanonicalBlock2StorageNodes), len(storageNodes))
|
||||||
|
|
||||||
|
for i, expectedStorageNode := range expectedNonCanonicalBlock2StorageNodes {
|
||||||
|
require.Equal(t, expectedStorageNode, storageNodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
@ -50,27 +49,15 @@ func setupPGXIndexer(t *testing.T) {
|
|||||||
|
|
||||||
func setupPGX(t *testing.T) {
|
func setupPGX(t *testing.T) {
|
||||||
setupPGXIndexer(t)
|
setupPGXIndexer(t)
|
||||||
var tx interfaces.Batch
|
setupTestData(t)
|
||||||
tx, err = ind.PushBlock(
|
|
||||||
mockBlock,
|
|
||||||
mocks.MockReceipts,
|
|
||||||
mocks.MockBlock.Difficulty())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := tx.Submit(err); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for _, node := range mocks.StateDiffs {
|
|
||||||
err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, mocks.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber)
|
func setupPGXNonCanonical(t *testing.T) {
|
||||||
|
setupPGXIndexer(t)
|
||||||
|
setupTestDataNonCanonical(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test indexer for a canonical block
|
||||||
func TestPGXIndexer(t *testing.T) {
|
func TestPGXIndexer(t *testing.T) {
|
||||||
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
||||||
setupPGX(t)
|
setupPGX(t)
|
||||||
@ -337,7 +324,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the log leaf node.
|
// Decode the receipt leaf node.
|
||||||
var nodeElements []interface{}
|
var nodeElements []interface{}
|
||||||
err = rlp.DecodeBytes(result[0].Data, &nodeElements)
|
err = rlp.DecodeBytes(result[0].Data, &nodeElements)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -605,6 +592,57 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test indexer for a canonical + a non-canonical block at London height + a non-canonical block at London height + 1
|
||||||
|
func TestPGXIndexerNonCanonical(t *testing.T) {
|
||||||
|
t.Run("Publish and index header", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexHeaderNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index transactions", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexTransactionsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexReceiptsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexLogsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexStateNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
|
setupPGXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexStorageNonCanonical(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestPGXWatchAddressMethods(t *testing.T) {
|
func TestPGXWatchAddressMethods(t *testing.T) {
|
||||||
setupPGXIndexer(t)
|
setupPGXIndexer(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
@ -51,27 +50,15 @@ func setupSQLXIndexer(t *testing.T) {
|
|||||||
|
|
||||||
func setupSQLX(t *testing.T) {
|
func setupSQLX(t *testing.T) {
|
||||||
setupSQLXIndexer(t)
|
setupSQLXIndexer(t)
|
||||||
var tx interfaces.Batch
|
setupTestData(t)
|
||||||
tx, err = ind.PushBlock(
|
|
||||||
mockBlock,
|
|
||||||
mocks.MockReceipts,
|
|
||||||
mocks.MockBlock.Difficulty())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := tx.Submit(err); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for _, node := range mocks.StateDiffs {
|
|
||||||
err = ind.PushStateNode(tx, node, mockBlock.Hash().String())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, mocks.BlockNumber.String(), tx.(*sql.BatchTx).BlockNumber)
|
func setupSQLXNonCanonical(t *testing.T) {
|
||||||
|
setupPGXIndexer(t)
|
||||||
|
setupTestDataNonCanonical(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test indexer for a canonical block
|
||||||
func TestSQLXIndexer(t *testing.T) {
|
func TestSQLXIndexer(t *testing.T) {
|
||||||
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
||||||
setupSQLX(t)
|
setupSQLX(t)
|
||||||
@ -331,7 +318,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the log leaf node.
|
// Decode the receipt leaf node.
|
||||||
var nodeElements []interface{}
|
var nodeElements []interface{}
|
||||||
err = rlp.DecodeBytes(result[0].Data, &nodeElements)
|
err = rlp.DecodeBytes(result[0].Data, &nodeElements)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -598,6 +585,57 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test indexer for a canonical + a non-canonical block at London height + a non-canonical block at London height + 1
|
||||||
|
func TestSQLXIndexerNonCanonical(t *testing.T) {
|
||||||
|
t.Run("Publish and index header", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexHeaderNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index transactions", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexTransactionsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexReceiptsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexLogsNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexStateNonCanonical(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
|
setupSQLXNonCanonical(t)
|
||||||
|
defer tearDown(t)
|
||||||
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
testPublishAndIndexStorageNonCanonical(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestSQLXWatchAddressMethods(t *testing.T) {
|
func TestSQLXWatchAddressMethods(t *testing.T) {
|
||||||
setupSQLXIndexer(t)
|
setupSQLXIndexer(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
@ -40,6 +40,9 @@ var (
|
|||||||
// TODO: Update this to `MainnetChainConfig` when `LondonBlock` is added
|
// TODO: Update this to `MainnetChainConfig` when `LondonBlock` is added
|
||||||
TestConfig = params.RopstenChainConfig
|
TestConfig = params.RopstenChainConfig
|
||||||
BlockNumber = TestConfig.LondonBlock
|
BlockNumber = TestConfig.LondonBlock
|
||||||
|
|
||||||
|
// canonical block at London height
|
||||||
|
// includes 5 transactions: 3 Legacy + 1 EIP-2930 + 1 EIP-1559
|
||||||
MockHeader = types.Header{
|
MockHeader = types.Header{
|
||||||
Time: 0,
|
Time: 0,
|
||||||
Number: new(big.Int).Set(BlockNumber),
|
Number: new(big.Int).Set(BlockNumber),
|
||||||
@ -54,6 +57,34 @@ var (
|
|||||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestConfig, BlockNumber)
|
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestConfig, BlockNumber)
|
||||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, new(trie.Trie))
|
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, new(trie.Trie))
|
||||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||||
|
|
||||||
|
// non-canonical block at London height
|
||||||
|
// includes 2nd and 5th transactions from the canonical block
|
||||||
|
MockNonCanonicalHeader = MockHeader
|
||||||
|
MockNonCanonicalBlockTransactions = types.Transactions{MockTransactions[1], MockTransactions[4]}
|
||||||
|
MockNonCanonicalBlockReceipts = createNonCanonicalBlockReceipts(TestConfig, BlockNumber, MockNonCanonicalBlockTransactions)
|
||||||
|
MockNonCanonicalBlock = types.NewBlock(&MockNonCanonicalHeader, MockNonCanonicalBlockTransactions, nil, MockNonCanonicalBlockReceipts, new(trie.Trie))
|
||||||
|
MockNonCanonicalHeaderRlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock.Header())
|
||||||
|
|
||||||
|
// non-canonical block at London height + 1
|
||||||
|
// includes 3rd and 5th transactions from the canonical block
|
||||||
|
Block2Number = big.NewInt(BlockNumber.Int64() + 1)
|
||||||
|
MockNonCanonicalHeader2 = types.Header{
|
||||||
|
Time: 0,
|
||||||
|
Number: new(big.Int).Set(Block2Number),
|
||||||
|
Root: common.HexToHash("0x0"),
|
||||||
|
TxHash: common.HexToHash("0x0"),
|
||||||
|
ReceiptHash: common.HexToHash("0x0"),
|
||||||
|
Difficulty: big.NewInt(6000000),
|
||||||
|
Extra: []byte{},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
||||||
|
}
|
||||||
|
MockNonCanonicalBlock2Transactions = types.Transactions{MockTransactions[2], MockTransactions[4]}
|
||||||
|
MockNonCanonicalBlock2Receipts = createNonCanonicalBlockReceipts(TestConfig, Block2Number, MockNonCanonicalBlock2Transactions)
|
||||||
|
MockNonCanonicalBlock2 = types.NewBlock(&MockNonCanonicalHeader2, MockNonCanonicalBlock2Transactions, nil, MockNonCanonicalBlock2Receipts, new(trie.Trie))
|
||||||
|
MockNonCanonicalHeader2Rlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock2.Header())
|
||||||
|
|
||||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||||
@ -440,3 +471,38 @@ func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.
|
|||||||
|
|
||||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4, signedTrx5}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4, mockReceipt5}, senderAddr
|
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4, signedTrx5}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4, mockReceipt5}, senderAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createNonCanonicalBlockReceipts is a helper function to generate mock receipts with mock logs for non-canonical blocks
|
||||||
|
func createNonCanonicalBlockReceipts(config *params.ChainConfig, blockNumber *big.Int, transactions types.Transactions) types.Receipts {
|
||||||
|
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||||
|
mockCurve := elliptic.P256()
|
||||||
|
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
signedTrx0, err := types.SignTx(transactions[0], transactionSigner, mockPrvKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
signedTrx1, err := types.SignTx(transactions[1], transactionSigner, mockPrvKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
mockReceipt0 := types.NewReceipt(common.HexToHash("0x3").Bytes(), false, 300)
|
||||||
|
mockReceipt0.Logs = []*types.Log{MockLog1, ShortLog1}
|
||||||
|
mockReceipt0.TxHash = signedTrx0.Hash()
|
||||||
|
|
||||||
|
mockReceipt1 := &types.Receipt{
|
||||||
|
Type: types.DynamicFeeTxType,
|
||||||
|
PostState: common.HexToHash("0x4").Bytes(),
|
||||||
|
Status: types.ReceiptStatusSuccessful,
|
||||||
|
CumulativeGasUsed: 300,
|
||||||
|
Logs: []*types.Log{},
|
||||||
|
TxHash: signedTrx1.Hash(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.Receipts{mockReceipt0, mockReceipt1}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user