Merge pull request #92 from vulcanize/fix-get-logs

Fix `getLog` API to use `log_cids` table
This commit is contained in:
Arijit Das 2021-09-16 17:43:24 +05:30 committed by GitHub
commit cf4543961c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 1274 additions and 1709 deletions

View File

@ -28,9 +28,9 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/gap-filler/pkg/mux" "github.com/vulcanize/gap-filler/pkg/mux"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/graphql" "github.com/vulcanize/ipld-eth-server/pkg/graphql"
srpc "github.com/vulcanize/ipld-eth-server/pkg/rpc" srpc "github.com/vulcanize/ipld-eth-server/pkg/rpc"
s "github.com/vulcanize/ipld-eth-server/pkg/serve" s "github.com/vulcanize/ipld-eth-server/pkg/serve"

View File

@ -16,6 +16,7 @@ CREATE TABLE eth.header_cids (
bloom BYTEA NOT NULL, bloom BYTEA NOT NULL,
timestamp NUMERIC NOT NULL, timestamp NUMERIC NOT NULL,
times_validated INTEGER NOT NULL DEFAULT 1, times_validated INTEGER NOT NULL DEFAULT 1,
base_fee BIGINT,
UNIQUE (block_number, block_hash) UNIQUE (block_number, block_hash)
); );

View File

@ -9,6 +9,7 @@ CREATE TABLE eth.transaction_cids (
dst VARCHAR(66) NOT NULL, dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL, src VARCHAR(66) NOT NULL,
tx_data BYTEA, tx_data BYTEA,
tx_type BYTEA,
UNIQUE (header_id, tx_hash) UNIQUE (header_id, tx_hash)
); );

View File

@ -6,13 +6,9 @@ CREATE TABLE eth.receipt_cids (
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
contract VARCHAR(66), contract VARCHAR(66),
contract_hash VARCHAR(66), contract_hash VARCHAR(66),
topic0s VARCHAR(66)[],
topic1s VARCHAR(66)[],
topic2s VARCHAR(66)[],
topic3s VARCHAR(66)[],
log_contracts VARCHAR(66)[],
post_state VARCHAR(66), post_state VARCHAR(66),
post_status INTEGER, post_status INTEGER,
log_root VARCHAR(66),
UNIQUE (tx_id) UNIQUE (tx_id)
); );

View File

@ -36,16 +36,6 @@ CREATE INDEX rct_contract_index ON eth.receipt_cids USING btree (contract);
CREATE INDEX rct_contract_hash_index ON eth.receipt_cids USING btree (contract_hash); CREATE INDEX rct_contract_hash_index ON eth.receipt_cids USING btree (contract_hash);
CREATE INDEX rct_topic0_index ON eth.receipt_cids USING gin (topic0s);
CREATE INDEX rct_topic1_index ON eth.receipt_cids USING gin (topic1s);
CREATE INDEX rct_topic2_index ON eth.receipt_cids USING gin (topic2s);
CREATE INDEX rct_topic3_index ON eth.receipt_cids USING gin (topic3s);
CREATE INDEX rct_log_contract_index ON eth.receipt_cids USING gin (log_contracts);
-- state node indexes -- state node indexes
CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id); CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id);
@ -57,6 +47,8 @@ CREATE INDEX state_mh_index ON eth.state_cids USING btree (mh_key);
CREATE INDEX state_path_index ON eth.state_cids USING btree (state_path); CREATE INDEX state_path_index ON eth.state_cids USING btree (state_path);
CREATE INDEX state_node_type_index ON eth.state_cids USING btree (node_type);
-- storage node indexes -- storage node indexes
CREATE INDEX storage_state_id_index ON eth.storage_cids USING btree (state_id); CREATE INDEX storage_state_id_index ON eth.storage_cids USING btree (state_id);
@ -68,6 +60,8 @@ CREATE INDEX storage_mh_index ON eth.storage_cids USING btree (mh_key);
CREATE INDEX storage_path_index ON eth.storage_cids USING btree (storage_path); CREATE INDEX storage_path_index ON eth.storage_cids USING btree (storage_path);
CREATE INDEX storage_node_type_index ON eth.storage_cids USING btree (node_type);
-- state accounts indexes -- state accounts indexes
CREATE INDEX account_state_id_index ON eth.state_accounts USING btree (state_id); CREATE INDEX account_state_id_index ON eth.state_accounts USING btree (state_id);
@ -79,6 +73,7 @@ DROP INDEX eth.storage_root_index;
DROP INDEX eth.account_state_id_index; DROP INDEX eth.account_state_id_index;
-- storage node indexes -- storage node indexes
DROP INDEX eth.storage_node_type_index;
DROP INDEX eth.storage_path_index; DROP INDEX eth.storage_path_index;
DROP INDEX eth.storage_mh_index; DROP INDEX eth.storage_mh_index;
DROP INDEX eth.storage_cid_index; DROP INDEX eth.storage_cid_index;
@ -86,6 +81,7 @@ DROP INDEX eth.storage_leaf_key_index;
DROP INDEX eth.storage_state_id_index; DROP INDEX eth.storage_state_id_index;
-- state node indexes -- state node indexes
DROP INDEX eth.state_node_type_index;
DROP INDEX eth.state_path_index; DROP INDEX eth.state_path_index;
DROP INDEX eth.state_mh_index; DROP INDEX eth.state_mh_index;
DROP INDEX eth.state_cid_index; DROP INDEX eth.state_cid_index;
@ -93,11 +89,6 @@ DROP INDEX eth.state_leaf_key_index;
DROP INDEX eth.state_header_id_index; DROP INDEX eth.state_header_id_index;
-- receipt indexes -- receipt indexes
DROP INDEX eth.rct_log_contract_index;
DROP INDEX eth.rct_topic3_index;
DROP INDEX eth.rct_topic2_index;
DROP INDEX eth.rct_topic1_index;
DROP INDEX eth.rct_topic0_index;
DROP INDEX eth.rct_contract_hash_index; DROP INDEX eth.rct_contract_hash_index;
DROP INDEX eth.rct_contract_index; DROP INDEX eth.rct_contract_index;
DROP INDEX eth.rct_mh_index; DROP INDEX eth.rct_mh_index;

View File

@ -0,0 +1,61 @@
-- +goose Up
CREATE TABLE eth.log_cids (
id SERIAL PRIMARY KEY,
leaf_cid TEXT NOT NULL,
leaf_mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
receipt_id INTEGER NOT NULL REFERENCES eth.receipt_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
address VARCHAR(66) NOT NULL,
log_data BYTEA,
index INTEGER NOT NULL,
topic0 VARCHAR(66),
topic1 VARCHAR(66),
topic2 VARCHAR(66),
topic3 VARCHAR(66),
UNIQUE (receipt_id, index)
);
CREATE INDEX log_mh_index ON eth.log_cids USING btree (leaf_mh_key);
CREATE INDEX log_cid_index ON eth.log_cids USING btree (leaf_cid);
CREATE INDEX log_rct_id_index ON eth.log_cids USING btree (receipt_id);
--
-- Name: log_topic0_index; Type: INDEX; Schema: eth; Owner: -
--
CREATE INDEX log_topic0_index ON eth.log_cids USING btree (topic0);
--
-- Name: log_topic1_index; Type: INDEX; Schema: eth; Owner: -
--
CREATE INDEX log_topic1_index ON eth.log_cids USING btree (topic1);
--
-- Name: log_topic2_index; Type: INDEX; Schema: eth; Owner: -
--
CREATE INDEX log_topic2_index ON eth.log_cids USING btree (topic2);
--
-- Name: log_topic3_index; Type: INDEX; Schema: eth; Owner: -
--
CREATE INDEX log_topic3_index ON eth.log_cids USING btree (topic3);
-- +goose Down
-- log indexes
DROP INDEX eth.log_mh_index;
DROP INDEX eth.log_cid_index;
DROP INDEX eth.log_rct_id_index;
DROP INDEX eth.log_topic0_index;
DROP INDEX eth.log_topic1_index;
DROP INDEX eth.log_topic2_index;
DROP INDEX eth.log_topic3_index;
DROP TABLE eth.log_cids;

View File

@ -5,7 +5,7 @@ services:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
- statediff-migrations - statediff-migrations
image: vulcanize/dapptools:v0.29.0-v1.10.2-statediff-0.0.19 image: vulcanize/dapptools:v0.29.0-v1.10.8-statediff-0.0.26
environment: environment:
DB_USER: vdbm DB_USER: vdbm
DB_NAME: vulcanize_public DB_NAME: vulcanize_public
@ -20,7 +20,7 @@ services:
restart: on-failure restart: on-failure
depends_on: depends_on:
- db - db
image: vulcanize/statediff-migrations:v0.4.0 image: vulcanize/statediff-migrations:v0.6.0
environment: environment:
DATABASE_USER: vdbm DATABASE_USER: vdbm
DATABASE_NAME: vulcanize_public DATABASE_NAME: vulcanize_public

8
go.mod
View File

@ -3,7 +3,7 @@ module github.com/vulcanize/ipld-eth-server
go 1.13 go 1.13
require ( require (
github.com/ethereum/go-ethereum v1.9.25 github.com/ethereum/go-ethereum v1.10.8
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
github.com/ipfs/go-block-format v0.0.2 github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cid v0.0.7
@ -13,6 +13,7 @@ require (
github.com/jmoiron/sqlx v1.2.0 github.com/jmoiron/sqlx v1.2.0
github.com/lib/pq v1.10.2 github.com/lib/pq v1.10.2
github.com/machinebox/graphql v0.2.2 github.com/machinebox/graphql v0.2.2
github.com/matryer/is v1.4.0 // indirect
github.com/multiformats/go-multihash v0.0.14 github.com/multiformats/go-multihash v0.0.14
github.com/onsi/ginkgo v1.16.2 github.com/onsi/ginkgo v1.16.2
github.com/onsi/gomega v1.10.1 github.com/onsi/gomega v1.10.1
@ -23,10 +24,9 @@ require (
github.com/spf13/viper v1.7.0 github.com/spf13/viper v1.7.0
github.com/tklauser/go-sysconf v0.3.6 // indirect github.com/tklauser/go-sysconf v0.3.6 // indirect
github.com/vulcanize/gap-filler v0.3.1 github.com/vulcanize/gap-filler v0.3.1
github.com/vulcanize/ipfs-ethdb v0.0.2-alpha github.com/vulcanize/ipfs-ethdb v0.0.4-0.20210824131459-7bb49801fc12
github.com/vulcanize/ipld-eth-indexer v0.7.1-alpha.0.20210805022537-b4692fa49849
) )
replace github.com/ethereum/go-ethereum v1.9.25 => github.com/vulcanize/go-ethereum v1.10.4-statediff-0.0.25 replace github.com/ethereum/go-ethereum v1.10.8 => github.com/vulcanize/go-ethereum v1.10.8-statediff-0.0.26
replace github.com/vulcanize/ipfs-ethdb v0.0.2-alpha => github.com/vulcanize/pg-ipfs-ethdb v0.0.2-alpha replace github.com/vulcanize/ipfs-ethdb v0.0.2-alpha => github.com/vulcanize/pg-ipfs-ethdb v0.0.2-alpha

974
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@ import (
"fmt" "fmt"
"io" "io"
"math/big" "math/big"
"strconv"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -93,6 +94,7 @@ func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.Block
return pea.rpcMarshalHeader(header) return pea.rpcMarshalHeader(header)
} }
} }
return nil, err return nil, err
} }
@ -104,6 +106,7 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
return res return res
} }
} }
if pea.ethClient != nil { if pea.ethClient != nil {
if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil { if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
@ -112,17 +115,20 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
} }
} }
} }
return nil return nil
} }
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field
func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]interface{}, error) { func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]interface{}, error) {
fields := RPCMarshalHeader(header) fields := RPCMarshalHeader(header, pea.B.Config.ChainConfig.Clique != nil)
td, err := pea.B.GetTd(header.Hash()) td, err := pea.B.GetTd(header.Hash())
if err != nil { if err != nil {
return nil, err return nil, err
} }
fields["totalDifficulty"] = (*hexutil.Big)(td) fields["totalDifficulty"] = (*hexutil.Big)(td)
return fields, nil return fields, nil
} }
@ -142,12 +148,14 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
if block != nil && err == nil { if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
if pea.ethClient != nil { if pea.ethClient != nil {
if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil { if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil {
go pea.writeStateDiffAt(number.Int64()) go pea.writeStateDiffAt(number.Int64())
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
} }
return nil, err return nil, err
} }
@ -158,12 +166,14 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
if block != nil && err == nil { if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
if pea.ethClient != nil { if pea.ethClient != nil {
if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil { if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
} }
return nil, err return nil, err
} }
@ -176,9 +186,11 @@ func (pea *PublicEthAPI) ChainId() hexutil.Uint64 {
return 0 return 0
} }
if config := pea.B.Config.ChainConfig; config.IsEIP155(block.Number()) { if config := pea.B.Config.ChainConfig; config.IsEIP155(block.Number()) {
chainID = config.ChainID chainID = config.ChainID
} }
return (hexutil.Uint64)(chainID.Uint64()) return (hexutil.Uint64)(chainID.Uint64())
} }
@ -201,12 +213,14 @@ func (pea *PublicEthAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, bloc
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false) return pea.rpcMarshalBlock(block, false, false)
} }
if pea.rpc != nil { if pea.rpc != nil {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil { if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false) return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
} }
} }
return nil, err return nil, err
} }
@ -223,12 +237,14 @@ func (pea *PublicEthAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockH
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false) return pea.rpcMarshalBlock(block, false, false)
} }
if pea.rpc != nil { if pea.rpc != nil {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil { if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false) return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
} }
} }
return nil, err return nil, err
} }
@ -238,6 +254,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr
n := hexutil.Uint(len(block.Uncles())) n := hexutil.Uint(len(block.Uncles()))
return &n return &n
} }
if pea.rpc != nil { if pea.rpc != nil {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil {
@ -245,6 +262,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr
return num return num
} }
} }
return nil return nil
} }
@ -254,6 +272,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash
n := hexutil.Uint(len(block.Uncles())) n := hexutil.Uint(len(block.Uncles()))
return &n return &n
} }
if pea.rpc != nil { if pea.rpc != nil {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil {
@ -261,6 +280,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash
return num return num
} }
} }
return nil return nil
} }
@ -276,6 +296,7 @@ func (pea *PublicEthAPI) GetTransactionCount(ctx context.Context, address common
if count != nil && err == nil { if count != nil && err == nil {
return count, nil return count, nil
} }
if pea.rpc != nil { if pea.rpc != nil {
var num *hexutil.Uint64 var num *hexutil.Uint64
if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil {
@ -283,6 +304,7 @@ func (pea *PublicEthAPI) GetTransactionCount(ctx context.Context, address common
return num, nil return num, nil
} }
} }
return nil, err return nil, err
} }
@ -291,6 +313,7 @@ func (pea *PublicEthAPI) localGetTransactionCount(ctx context.Context, address c
if err != nil { if err != nil {
return nil, err return nil, err
} }
nonce := hexutil.Uint64(account.Nonce) nonce := hexutil.Uint64(account.Nonce)
return &nonce, nil return &nonce, nil
} }
@ -301,6 +324,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByNumber(ctx context.Context, b
n := hexutil.Uint(len(block.Transactions())) n := hexutil.Uint(len(block.Transactions()))
return &n return &n
} }
if pea.rpc != nil { if pea.rpc != nil {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil {
@ -308,6 +332,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByNumber(ctx context.Context, b
return num return num
} }
} }
return nil return nil
} }
@ -317,6 +342,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByHash(ctx context.Context, blo
n := hexutil.Uint(len(block.Transactions())) n := hexutil.Uint(len(block.Transactions()))
return &n return &n
} }
if pea.rpc != nil { if pea.rpc != nil {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil {
@ -324,6 +350,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByHash(ctx context.Context, blo
return num return num
} }
} }
return nil return nil
} }
@ -332,6 +359,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil { if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index)) return newRPCTransactionFromBlockIndex(block, uint64(index))
} }
if pea.rpc != nil { if pea.rpc != nil {
var tx *RPCTransaction var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
@ -339,6 +367,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context
return tx return tx
} }
} }
return nil return nil
} }
@ -347,6 +376,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockHashAndIndex(ctx context.Context,
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil { if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index)) return newRPCTransactionFromBlockIndex(block, uint64(index))
} }
if pea.rpc != nil { if pea.rpc != nil {
var tx *RPCTransaction var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
@ -354,6 +384,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockHashAndIndex(ctx context.Context,
return tx return tx
} }
} }
return nil return nil
} }
@ -586,24 +617,14 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
} }
}() }()
// If we have a blockhash to filter on, fire off single retrieval query // If we have a blockHash to filter on, fire off single retrieval query
if crit.BlockHash != nil { if crit.BlockHash != nil {
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil) filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, 0, crit.BlockHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, rctCIDs)
if err != nil { return decomposeLogs(filteredLogs)
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
block, err := pea.B.BlockByHash(context.Background(), *crit.BlockHash)
if err != nil {
return nil, err
}
return extractLogsOfInterest(pea.B.Config.ChainConfig, *crit.BlockHash, block.NumberU64(), block.Transactions(), rctIPLDs, filter)
} }
// Otherwise, create block range from criteria // Otherwise, create block range from criteria
@ -626,27 +647,17 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
end := endingBlock.Int64() end := endingBlock.Int64()
var logs []*types.Log var logs []*types.Log
for i := start; i <= end; i++ { for i := start; i <= end; i++ {
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil) filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, i, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
block, err := pea.B.BlockByNumber(context.Background(), rpc.BlockNumber(i)) logCIDs, err := decomposeLogs(filteredLogs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, rctCIDs) logs = append(logs, logCIDs...)
if err != nil {
return nil, err
}
log, err := extractLogsOfInterest(pea.B.Config.ChainConfig, block.Hash(), uint64(i), block.Transactions(), rctIPLDs, filter)
if err != nil {
return nil, err
}
logs = append(logs, log...)
} }
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
@ -1069,7 +1080,7 @@ func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field
func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields, err := RPCMarshalBlock(b, inclTx, fullTx) fields, err := RPCMarshalBlock(b, inclTx, fullTx, pea.B.Config.ChainConfig.Clique != nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1085,7 +1096,7 @@ func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx boo
// rpcMarshalBlockWithUncleHashes uses the generalized output filler, then adds the total difficulty field // rpcMarshalBlockWithUncleHashes uses the generalized output filler, then adds the total difficulty field
func (pea *PublicEthAPI) rpcMarshalBlockWithUncleHashes(b *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool) (map[string]interface{}, error) { func (pea *PublicEthAPI) rpcMarshalBlockWithUncleHashes(b *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields, err := RPCMarshalBlockWithUncleHashes(b, uncleHashes, inclTx, fullTx) fields, err := RPCMarshalBlockWithUncleHashes(b, uncleHashes, inclTx, fullTx, pea.B.Config.ChainConfig.Clique != nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1105,3 +1116,42 @@ func toHexSlice(b [][]byte) []string {
} }
return r return r
} }
// decomposeLogs return logs from LogResult.
func decomposeLogs(logCIDs []LogResult) ([]*types.Log, error) {
logs := make([]*types.Log, len(logCIDs))
for i, l := range logCIDs {
topics := make([]common.Hash, 0)
if l.Topic0 != "" {
topics = append(topics, common.HexToHash(l.Topic0))
}
if l.Topic1 != "" {
topics = append(topics, common.HexToHash(l.Topic1))
}
if l.Topic2 != "" {
topics = append(topics, common.HexToHash(l.Topic2))
}
if l.Topic3 != "" {
topics = append(topics, common.HexToHash(l.Topic3))
}
// TODO: should we convert string to uint ?
blockNum, err := strconv.ParseUint(l.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
logs[i] = &types.Log{
Address: common.HexToAddress(l.Address),
Topics: topics,
Data: l.Data,
BlockNumber: blockNum,
TxHash: common.HexToHash(l.TxHash),
TxIndex: uint(l.TxnIndex),
BlockHash: common.HexToHash(l.BlockHash),
Index: uint(l.Index),
}
}
return logs, nil
}

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"math/big" "math/big"
"strconv" "strconv"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -30,13 +31,13 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
pgipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
@ -46,9 +47,8 @@ var (
randomHash = crypto.Keccak256Hash(randomAddr.Bytes()) randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64()) number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum.Int64()) londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum.Int64())
wrongNumber = rpc.BlockNumber(number + 1) wrongNumber = number + 1
blockHash = test_helpers.MockBlock.Header().Hash() blockHash = test_helpers.MockBlock.Header().Hash()
londonBlockHash = test_helpers.MockLondonBlock.Header().Hash()
baseFee = test_helpers.MockLondonBlock.BaseFee() baseFee = test_helpers.MockLondonBlock.BaseFee()
ctx = context.Background() ctx = context.Background()
expectedBlock = map[string]interface{}{ expectedBlock = map[string]interface{}{
@ -150,7 +150,7 @@ var (
"contractAddress": nil, "contractAddress": nil,
"logs": test_helpers.MockReceipts[0].Logs, "logs": test_helpers.MockReceipts[0].Logs,
"logsBloom": test_helpers.MockReceipts[0].Bloom, "logsBloom": test_helpers.MockReceipts[0].Bloom,
"root": hexutil.Bytes(test_helpers.MockReceipts[0].PostState), "status": hexutil.Uint(test_helpers.MockReceipts[0].Status),
} }
expectedReceipt2 = map[string]interface{}{ expectedReceipt2 = map[string]interface{}{
"blockHash": blockHash, "blockHash": blockHash,
@ -182,6 +182,18 @@ var (
} }
) )
// SetupDB is use to setup a db for watcher tests
func SetupDB() (*postgres.DB, error) {
uri := postgres.DbConnectionString(postgres.ConnectionParams{
User: "vdbm",
Password: "password",
Hostname: "localhost",
Name: "vulcanize_testing",
Port: 8077,
})
return postgres.NewDB(uri, postgres.ConnectionConfig{}, node.Info{})
}
var _ = Describe("API", func() { var _ = Describe("API", func() {
var ( var (
db *postgres.DB db *postgres.DB
@ -191,21 +203,45 @@ var _ = Describe("API", func() {
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database // Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
// Note: if you focus one of the tests be sure to focus this and the defered It() // Note: if you focus one of the tests be sure to focus this and the defered It()
It("test init", func() { It("test init", func() {
var err error var (
db, err = shared.SetupDB() err error
tx *indexer.BlockTx
)
db, err = SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
indexAndPublisher := eth2.NewIPLDPublisher(db) indexAndPublisher := indexer.NewStateDiffIndexer(chainConfig, db)
backend, err := eth.NewEthBackend(db, &eth.Config{ backend, err := eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig, ChainConfig: chainConfig,
VmConfig: vm.Config{}, VmConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call. RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
CacheConfig: pgipfsethdb.CacheConfig{
Name: "api_test",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
},
}) })
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
api = eth.NewPublicEthAPI(backend, nil, false) api = eth.NewPublicEthAPI(backend, nil, false)
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload) tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
for _, node := range test_helpers.MockStateNodes {
err = indexAndPublisher.PushStateNode(tx, node)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}
ccHash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.ContractCodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
uncles := test_helpers.MockBlock.Uncles() uncles := test_helpers.MockBlock.Uncles()
uncleHashes := make([]common.Hash, len(uncles)) uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles { for i, uncle := range uncles {
@ -213,7 +249,13 @@ var _ = Describe("API", func() {
} }
expectedBlock["uncles"] = uncleHashes expectedBlock["uncles"] = uncleHashes
err = indexAndPublisher.Publish(test_helpers.MockConvertedLondonPayload) // setting chain config to for london block
chainConfig.LondonBlock = big.NewInt(2)
indexAndPublisher = indexer.NewStateDiffIndexer(chainConfig, db)
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
@ -339,8 +381,7 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
_, ok := block["baseFee"] _, ok := block["baseFee"]
Expect(ok).To(Equal(false)) Expect(ok).To(Equal(false))
block, err = api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
block, err = api.GetBlockByHash(ctx, londonBlockHash, false)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee)) Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
}) })
@ -440,14 +481,14 @@ var _ = Describe("API", func() {
Describe("eth_getBlockTransactionCountByNumber", func() { Describe("eth_getBlockTransactionCountByNumber", func() {
It("Retrieves the number of transactions in the canonical block with the provided number", func() { It("Retrieves the number of transactions in the canonical block with the provided number", func() {
count := api.GetBlockTransactionCountByNumber(ctx, number) count := api.GetBlockTransactionCountByNumber(ctx, number)
Expect(uint64(*count)).To(Equal(uint64(3))) Expect(uint64(*count)).To(Equal(uint64(4)))
}) })
}) })
Describe("eth_getBlockTransactionCountByHash", func() { Describe("eth_getBlockTransactionCountByHash", func() {
It("Retrieves the number of transactions in the block with the provided hash ", func() { It("Retrieves the number of transactions in the block with the provided hash ", func() {
count := api.GetBlockTransactionCountByHash(ctx, blockHash) count := api.GetBlockTransactionCountByHash(ctx, blockHash)
Expect(uint64(*count)).To(Equal(uint64(3))) Expect(uint64(*count)).To(Equal(uint64(4)))
}) })
}) })
@ -490,7 +531,7 @@ var _ = Describe("API", func() {
}) })
It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() { It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() {
tx := api.GetTransactionByBlockHashAndIndex(ctx, londonBlockHash, 0) tx := api.GetTransactionByBlockHashAndIndex(ctx, test_helpers.MockLondonBlock.Hash(), 0)
Expect(tx).ToNot(BeNil()) Expect(tx).ToNot(BeNil())
Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap()))) Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap())))
Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap()))) Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap())))
@ -610,7 +651,13 @@ var _ = Describe("API", func() {
crit := filters.FilterCriteria{ crit := filters.FilterCriteria{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x0c"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0b"),
}, },
}, },
FromBlock: test_helpers.MockBlock.Number(), FromBlock: test_helpers.MockBlock.Number(),
@ -618,6 +665,58 @@ var _ = Describe("API", func() {
} }
logs, err := api.GetLogs(ctx, crit) logs, err := api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x08"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0c"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x09"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0b"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog4}))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
@ -775,8 +874,8 @@ var _ = Describe("API", func() {
} }
logs, err = api.GetLogs(ctx, crit) logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(6))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6}))
}) })
It("Uses the provided blockhash if one is provided", func() { It("Uses the provided blockhash if one is provided", func() {
@ -911,8 +1010,8 @@ var _ = Describe("API", func() {
} }
logs, err = api.GetLogs(ctx, crit) logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(6))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6}))
}) })
It("Filters on contract address if any are provided", func() { It("Filters on contract address if any are provided", func() {
@ -1034,20 +1133,3 @@ var _ = Describe("API", func() {
}) })
}) })
}) })
func publishCode(db *postgres.DB, codeHash common.Hash, code []byte) error {
tx, err := db.Beginx()
if err != nil {
return err
}
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
tx.Rollback()
return err
}
if err := shared.PublishDirect(tx, mhKey, code); err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}

View File

@ -39,14 +39,11 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
pgipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
ipfsethdb "github.com/vulcanize/ipfs-ethdb"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
shared2 "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
var ( var (
@ -54,6 +51,10 @@ var (
errNegativeBlockNumber = errors.New("negative block number not supported") errNegativeBlockNumber = errors.New("negative block number not supported")
errHeaderHashNotFound = errors.New("header for hash not found") errHeaderHashNotFound = errors.New("header for hash not found")
errHeaderNotFound = errors.New("header not found") errHeaderNotFound = errors.New("header not found")
// errMissingSignature is returned if a block's extra-data section doesn't seem
// to contain a 65 byte secp256k1 signature.
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
) )
const ( const (
@ -103,11 +104,13 @@ type Config struct {
VmConfig vm.Config VmConfig vm.Config
DefaultSender *common.Address DefaultSender *common.Address
RPCGasCap *big.Int RPCGasCap *big.Int
CacheConfig pgipfsethdb.CacheConfig
} }
func NewEthBackend(db *postgres.DB, c *Config) (*Backend, error) { func NewEthBackend(db *postgres.DB, c *Config) (*Backend, error) {
r := NewCIDRetriever(db) r := NewCIDRetriever(db)
ethDB := ipfsethdb.NewDatabase(db.DB)
ethDB := pgipfsethdb.NewDatabase(db.DB, c.CacheConfig)
return &Backend{ return &Backend{
DB: db, DB: db,
Retriever: r, Retriever: r,
@ -714,7 +717,7 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
return nil, err return nil, err
} }
var mhKey string var mhKey string
mhKey, err = shared2.MultihashKeyFromKeccak256(common.BytesToHash(codeHash)) mhKey, err = shared.MultihashKeyFromKeccak256(common.BytesToHash(codeHash))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -26,17 +26,22 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
) )
// RPCMarshalHeader converts the given header to the RPC output. // RPCMarshalHeader converts the given header to the RPC output.
// This function is eth/internal so we have to make our own version here... // This function is eth/internal so we have to make our own version here...
func RPCMarshalHeader(head *types.Header) map[string]interface{} { func RPCMarshalHeader(head *types.Header, extractMiner bool) map[string]interface{} {
if extractMiner {
if err := recoverMiner(head); err != nil {
return nil
}
}
headerMap := map[string]interface{}{ headerMap := map[string]interface{}{
"number": (*hexutil.Big)(head.Number), "number": (*hexutil.Big)(head.Number),
"hash": head.Hash(), "hash": head.Hash(),
@ -66,8 +71,8 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes. // transaction hashes.
func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, extractMiner bool) (map[string]interface{}, error) {
fields := RPCMarshalHeader(block.Header()) fields := RPCMarshalHeader(block.Header(), extractMiner)
fields["size"] = hexutil.Uint64(block.Size()) fields["size"] = hexutil.Uint64(block.Size())
if inclTx { if inclTx {
@ -100,8 +105,8 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]i
} }
// RPCMarshalBlockWithUncleHashes marshals the block with the provided uncle hashes // RPCMarshalBlockWithUncleHashes marshals the block with the provided uncle hashes
func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool) (map[string]interface{}, error) { func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool, extractMiner bool) (map[string]interface{}, error) {
fields := RPCMarshalHeader(block.Header()) fields := RPCMarshalHeader(block.Header(), extractMiner)
fields["size"] = hexutil.Uint64(block.Size()) fields["size"] = hexutil.Uint64(block.Size())
if inclTx { if inclTx {
@ -123,8 +128,8 @@ func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Has
} }
fields["transactions"] = transactions fields["transactions"] = transactions
} }
fields["uncles"] = uncleHashes
fields["uncles"] = uncleHashes
return fields, nil return fields, nil
} }
@ -271,122 +276,6 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee()) return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee())
} }
// extractLogsOfInterest returns logs from the receipt IPLD
func extractLogsOfInterest(config *params.ChainConfig, blockHash common.Hash, blockNumber uint64,
txs types.Transactions, rctIPLDs []ipfs.BlockModel, filter ReceiptFilter) ([]*types.Log, error) {
receipts := make(types.Receipts, len(rctIPLDs))
for i, rctBytes := range rctIPLDs {
rct := new(types.Receipt)
if err := rct.UnmarshalBinary(rctBytes.Data); err != nil {
return nil, err
}
receipts[i] = rct
}
err := receipts.DeriveFields(config, blockHash, blockNumber, txs)
if err != nil {
return nil, err
}
var unfilteredLogs []*types.Log
for _, receipt := range receipts {
unfilteredLogs = append(unfilteredLogs, receipt.Logs...)
}
adders := make([]common.Address, len(filter.LogAddresses))
for i, addr := range filter.LogAddresses {
adders[i] = common.HexToAddress(addr)
}
topics := make([][]common.Hash, len(filter.Topics))
for i, v := range filter.Topics {
topics[i] = make([]common.Hash, len(v))
for j, topic := range v {
topics[i][j] = common.HexToHash(topic)
}
}
logs := filterLogs(unfilteredLogs, nil, nil, adders, topics)
return logs, nil
}
func includes(addresses []common.Address, a common.Address) bool {
for _, addr := range addresses {
if addr == a {
return true
}
}
return false
}
// filterLogs creates a slice of logs matching the given criteria.
func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
var ret []*types.Log
Logs:
for _, log := range logs {
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
continue
}
if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
continue
}
if len(addresses) > 0 && !includes(addresses, log.Address) {
continue
}
// If the to filtered topics is greater than the amount of topics in logs, skip.
if len(topics) > len(log.Topics) {
continue
}
for i, sub := range topics {
match := len(sub) == 0 // empty rule set == wildcard
for _, topic := range sub {
if log.Topics[i] == topic {
match = true
break
}
}
if !match {
continue Logs
}
}
ret = append(ret, log)
}
return ret
}
// returns true if the log matches on the filter
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
// actualTopics will always have length <= 4
// wantedTopics will always have length 4
matches := 0
for i, actualTopic := range actualTopics {
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
if len(wantedTopics[i]) > 0 {
matches += sliceContainsHash(wantedTopics[i], actualTopic)
} else {
// Filter slot is empty, not matching any topics at this slot => counts as a match
matches++
}
}
if matches == len(actualTopics) {
return true
}
return false
}
// returns 1 if the slice contains the hash, 0 if it does not
func sliceContainsHash(slice []string, hash common.Hash) int {
for _, str := range slice {
if str == hash.String() {
return 1
}
}
return 0
}
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) { func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
arg := map[string]interface{}{ arg := map[string]interface{}{
"address": q.Addresses, "address": q.Addresses,
@ -414,3 +303,24 @@ func toBlockNumArg(number *big.Int) string {
} }
return hexutil.EncodeBig(number) return hexutil.EncodeBig(number)
} }
func recoverMiner(header *types.Header) error {
// Retrieve the signature from the header extra-data
if len(header.Extra) < crypto.SignatureLength {
return errMissingSignature
}
signature := header.Extra[len(header.Extra)-crypto.SignatureLength:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(clique.SealHash(header).Bytes(), signature)
if err != nil {
return err
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
header.Coinbase = signer
return nil
}

View File

@ -22,13 +22,12 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/lib/pq" "github.com/lib/pq"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/shared" "github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
@ -86,10 +85,10 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
}() }()
// Retrieve cached header CIDs at this block height // Retrieve cached header CIDs at this block height
var headers []eth.HeaderModel var headers []models.HeaderModel
headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber) headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil { if err != nil {
log.Error("header cid retrieval error") log.Error("header cid retrieval error", err)
return nil, true, err return nil, true, err
} }
cws := make([]CIDWrapper, len(headers)) cws := make([]CIDWrapper, len(headers))
@ -102,7 +101,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
empty = false empty = false
if filter.HeaderFilter.Uncles { if filter.HeaderFilter.Uncles {
// Retrieve uncle cids for this header id // Retrieve uncle cids for this header id
var uncleCIDs []eth.UncleModel var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID) uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
@ -166,18 +165,18 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
} }
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight // RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]eth.HeaderModel, error) { func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber) log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]eth.HeaderModel, 0) headers := make([]models.HeaderModel, 0)
pgStr := `SELECT * FROM eth.header_cids pgStr := `SELECT * FROM eth.header_cids
WHERE block_number = $1` WHERE block_number = $1`
return headers, tx.Select(&headers, pgStr, blockNumber) return headers, tx.Select(&headers, pgStr, blockNumber)
} }
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header // RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.UncleModel, error) { func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]models.UncleModel, error) {
log.Debug("retrieving uncle cids for block id ", headerID) log.Debug("retrieving uncle cids for block id ", headerID)
headers := make([]eth.UncleModel, 0) headers := make([]models.UncleModel, 0)
pgStr := `SELECT * FROM eth.uncle_cids pgStr := `SELECT * FROM eth.uncle_cids
WHERE header_id = $1` WHERE header_id = $1`
return headers, tx.Select(&headers, pgStr, headerID) return headers, tx.Select(&headers, pgStr, headerID)
@ -185,10 +184,10 @@ func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters // RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids // also returns the ids for the returned transaction cids
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]eth.TxModel, error) { func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]models.TxModel, error) {
log.Debug("retrieving transaction cids for header id ", headerID) log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
results := make([]eth.TxModel, 0) results := make([]models.TxModel, 0)
id := 1 id := 1
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id, pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
@ -210,94 +209,178 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
return results, tx.Select(&results, pgStr, args...) return results, tx.Select(&results, pgStr, args...)
} }
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
// filter parameters and correspond to the provided tx ids for i, topicSet := range topics {
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]eth.ReceiptModel, error) { if len(topicSet) == 0 {
log.Debug("retrieving receipt cids for header id ", headerID) continue
args := make([]interface{}, 0, 4) }
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, if !first {
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts pgStr += " AND"
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids } else {
WHERE receipt_cids.tx_id = transaction_cids.id first = false
AND transaction_cids.header_id = header_cids.id }
AND header_cids.id = $1` pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
id := 2 args = append(args, pq.Array(topicSet))
args = append(args, headerID) *id++
}
return pgStr, args
}
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
if len(rctFilter.LogAddresses) > 0 { if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
args = append(args, pq.Array(rctFilter.LogAddresses)) args = append(args, pq.Array(rctFilter.LogAddresses))
id++ *id++
}
// Filter on topics if there are any // Filter on topics if there are any
if hasTopics(rctFilter.Topics) { if hasTopics(rctFilter.Topics) {
pgStr += " AND (" pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
} }
return pgStr, args
}
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, trxIds []int64) (string, []interface{}) {
rctCond := " AND (receipt_cids.id = ANY ( "
logQuery := "SELECT receipt_id FROM eth.log_cids WHERE"
if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
} }
pgStr += ")" pgStr += ")"
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs // Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 { if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id) pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
args = append(args, pq.Array(trxIds)) args = append(args, pq.Array(trxIds))
} }
pgStr += ")" pgStr += ")"
} else { // If there are no contract addresses to filter on } else { // If there are no contract addresses to filter on
// Filter on topics if there are any // Filter on topics if there are any
if hasTopics(rctFilter.Topics) { if hasTopics(rctFilter.Topics) {
pgStr += " AND ((" pgStr += rctCond + logQuery
first := true pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")" pgStr += ")"
// Filter on txIDs if there are any and we are matching txs // Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 { if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id) pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
args = append(args, pq.Array(trxIds)) args = append(args, pq.Array(trxIds))
} }
pgStr += ")" pgStr += ")"
} else if rctFilter.MatchTxs && len(trxIds) > 0 { } else if rctFilter.MatchTxs && len(trxIds) > 0 {
// If there are no contract addresses or topics to filter on, // If there are no contract addresses or topics to filter on,
// Filter on txIDs if there are any and we are matching txs // Filter on txIDs if there are any and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id) pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
args = append(args, pq.Array(trxIds)) args = append(args, pq.Array(trxIds))
} }
} }
return pgStr, args
}
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
// filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]models.ReceiptModel, error) {
log.Debug("retrieving receipt cids for header id ", headerID)
args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract, receipt_cids.contract_hash
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND header_cids.id = $1`
id := 2
args = append(args, headerID)
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxIds)
pgStr += ` ORDER BY transaction_cids.index` pgStr += ` ORDER BY transaction_cids.index`
receiptCids := make([]eth.ReceiptModel, 0) receiptCids := make([]models.ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...) return receiptCids, tx.Select(&receiptCids, pgStr, args...)
} }
// RetrieveFilteredGQLLogs retrieves and returns all the log cIDs provided blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
id := 1
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.receipt_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.cid, eth.receipt_cids.post_status
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
WHERE eth.log_cids.receipt_id = receipt_cids.id
AND receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
args = append(args, blockHash.String())
id++
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
}
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.receipt_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
header_cids.block_hash, header_cids.block_number
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE eth.log_cids.receipt_id = receipt_cids.id
AND receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id`
id := 1
if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
args = append(args, blockNumber)
id++
}
if blockHash != nil {
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
args = append(args, blockHash.String())
id++
}
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
}
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided // RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
// filter parameters and correspond to the provided tx ids // filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]eth.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]models.ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber) log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 5) args := make([]interface{}, 0, 5)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key, pgStr := `SELECT receipt_cids.id, receipt_cids.cid, receipt_cids.mh_key, receipt_cids.tx_id
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.id WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.id` AND transaction_cids.header_id = header_cids.id`
@ -313,77 +396,10 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
id++ id++
} }
// TODO: Add the below filters when we have log index in DB. pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxIds)
if true {
pgStr += ` ORDER BY transaction_cids.index`
receiptCids := make([]eth.ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
}
if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
args = append(args, pq.Array(rctFilter.LogAddresses))
id++
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += " AND ("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += " AND (("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
// If there are no contract addresses or topics to filter on,
// Filter on txIDs if there are any and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
}
pgStr += ` ORDER BY transaction_cids.index` pgStr += ` ORDER BY transaction_cids.index`
receiptCids := make([]eth.ReceiptModel, 0) receiptCids := make([]models.ReceiptModel, 0)
return receiptCids, tx.Select(&receiptCids, pgStr, args...) return receiptCids, tx.Select(&receiptCids, pgStr, args...)
} }
@ -397,7 +413,7 @@ func hasTopics(topics [][]string) bool {
} }
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters // RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]eth.StateNodeModel, error) { func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]models.StateNodeModel, error) {
log.Debug("retrieving state cids for header id ", headerID) log.Debug("retrieving state cids for header id ", headerID)
args := make([]interface{}, 0, 2) args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.id, state_cids.header_id, pgStr := `SELECT state_cids.id, state_cids.header_id,
@ -417,12 +433,12 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
if !stateFilter.IntermediateNodes { if !stateFilter.IntermediateNodes {
pgStr += ` AND state_cids.node_type = 2` pgStr += ` AND state_cids.node_type = 2`
} }
stateNodeCIDs := make([]eth.StateNodeModel, 0) stateNodeCIDs := make([]models.StateNodeModel, 0)
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...) return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
} }
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters // RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]eth.StorageNodeWithStateKeyModel, error) { func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]models.StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for header id ", headerID) log.Debug("retrieving storage cids for header id ", headerID)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type, pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type,
@ -450,18 +466,18 @@ func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageF
if !storageFilter.IntermediateNodes { if !storageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.node_type = 2` pgStr += ` AND storage_cids.node_type = 2`
} }
storageNodeCIDs := make([]eth.StorageNodeWithStateKeyModel, 0) storageNodeCIDs := make([]models.StorageNodeWithStateKeyModel, 0)
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...) return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
} }
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash // RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
log.Debug("retrieving block cids for block hash ", blockHash.String()) log.Debug("retrieving block cids for block hash ", blockHash.String())
// Begin new db tx // Begin new db tx
tx, err := ecr.db.Beginx() tx, err := ecr.db.Beginx()
if err != nil { if err != nil {
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
defer func() { defer func() {
if p := recover(); p != nil { if p := recover(); p != nil {
@ -474,29 +490,29 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderM
} }
}() }()
var headerCID eth.HeaderModel var headerCID models.HeaderModel
headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash) headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash)
if err != nil { if err != nil {
log.Error("header cid retrieval error") log.Error("header cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
var uncleCIDs []eth.UncleModel var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID) uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
var txCIDs []eth.TxModel var txCIDs []models.TxModel
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID) txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
if err != nil { if err != nil {
log.Error("tx cid retrieval error") log.Error("tx cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
txIDs := make([]int64, len(txCIDs)) txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs { for i, txCID := range txCIDs {
txIDs[i] = txCID.ID txIDs[i] = txCID.ID
} }
var rctCIDs []eth.ReceiptModel var rctCIDs []models.ReceiptModel
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs) rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
if err != nil { if err != nil {
log.Error("rct cid retrieval error") log.Error("rct cid retrieval error")
@ -505,13 +521,13 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderM
} }
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number // RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
log.Debug("retrieving block cids for block number ", blockNumber) log.Debug("retrieving block cids for block number ", blockNumber)
// Begin new db tx // Begin new db tx
tx, err := ecr.db.Beginx() tx, err := ecr.db.Beginx()
if err != nil { if err != nil {
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
defer func() { defer func() {
if p := recover(); p != nil { if p := recover(); p != nil {
@ -524,32 +540,32 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderMod
} }
}() }()
var headerCID []eth.HeaderModel var headerCID []models.HeaderModel
headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber) headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil { if err != nil {
log.Error("header cid retrieval error") log.Error("header cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
if len(headerCID) < 1 { if len(headerCID) < 1 {
return eth.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber) return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
} }
var uncleCIDs []eth.UncleModel var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID) uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
var txCIDs []eth.TxModel var txCIDs []models.TxModel
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID) txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
if err != nil { if err != nil {
log.Error("tx cid retrieval error") log.Error("tx cid retrieval error")
return eth.HeaderModel{}, nil, nil, nil, err return models.HeaderModel{}, nil, nil, nil, err
} }
txIDs := make([]int64, len(txCIDs)) txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs { for i, txCID := range txCIDs {
txIDs[i] = txCID.ID txIDs[i] = txCID.ID
} }
var rctCIDs []eth.ReceiptModel var rctCIDs []models.ReceiptModel
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs) rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
if err != nil { if err != nil {
log.Error("rct cid retrieval error") log.Error("rct cid retrieval error")
@ -558,34 +574,33 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderMod
} }
// RetrieveHeaderCIDByHash returns the header for the given block hash // RetrieveHeaderCIDByHash returns the header for the given block hash
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (eth.HeaderModel, error) { func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
log.Debug("retrieving header cids for block hash ", blockHash.String()) log.Debug("retrieving header cids for block hash ", blockHash.String())
pgStr := `SELECT * FROM eth.header_cids pgStr := `SELECT * FROM eth.header_cids
WHERE block_hash = $1` WHERE block_hash = $1`
var headerCID eth.HeaderModel var headerCID models.HeaderModel
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String()) return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
} }
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id // RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.TxModel, error) { func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]models.TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID) log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM eth.transaction_cids pgStr := `SELECT * FROM eth.transaction_cids
WHERE header_id = $1 WHERE header_id = $1
ORDER BY index` ORDER BY index`
var txCIDs []eth.TxModel var txCIDs []models.TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID) return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
} }
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs // RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]eth.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]models.ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx ids %v", txIDs) log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key, pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s, receipt_cids.contract, receipt_cids.contract_hash
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids FROM eth.receipt_cids, eth.transaction_cids
WHERE tx_id = ANY($1::INTEGER[]) WHERE tx_id = ANY($1::INTEGER[])
AND receipt_cids.tx_id = transaction_cids.id AND receipt_cids.tx_id = transaction_cids.id
ORDER BY transaction_cids.index` ORDER BY transaction_cids.index`
var rctCIDs []eth.ReceiptModel var rctCIDs []models.ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs)) return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
} }

View File

@ -19,17 +19,17 @@ package eth_test
import ( import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
@ -212,14 +212,14 @@ var (
var _ = Describe("Retriever", func() { var _ = Describe("Retriever", func() {
var ( var (
db *postgres.DB db *postgres.DB
repo *eth2.IPLDPublisher diffIndexer *indexer.StateDiffIndexer
retriever *eth.CIDRetriever retriever *eth.CIDRetriever
) )
BeforeEach(func() { BeforeEach(func() {
var err error var err error
db, err = shared.SetupDB() db, err = SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
repo = eth2.NewIPLDPublisher(db) diffIndexer = indexer.NewStateDiffIndexer(params.TestChainConfig, db)
retriever = eth.NewCIDRetriever(db) retriever = eth.NewCIDRetriever(db)
}) })
AfterEach(func() { AfterEach(func() {
@ -228,7 +228,14 @@ var _ = Describe("Retriever", func() {
Describe("Retrieve", func() { Describe("Retrieve", func() {
BeforeEach(func() { BeforeEach(func() {
err := repo.Publish(test_helpers.MockConvertedPayload) tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
for _, node := range test_helpers.MockStateNodes {
err = diffIndexer.PushStateNode(tx, node)
Expect(err).ToNot(HaveOccurred())
}
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() { It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
@ -242,11 +249,11 @@ var _ = Describe("Retriever", func() {
expectedHeaderCID.ID = cids[0].Header.ID expectedHeaderCID.ID = cids[0].Header.ID
expectedHeaderCID.NodeID = cids[0].Header.NodeID expectedHeaderCID.NodeID = cids[0].Header.NodeID
Expect(cids[0].Header).To(Equal(expectedHeaderCID)) Expect(cids[0].Header).To(Equal(expectedHeaderCID))
Expect(len(cids[0].Transactions)).To(Equal(3)) Expect(len(cids[0].Transactions)).To(Equal(4))
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
Expect(len(cids[0].Receipts)).To(Equal(3)) Expect(len(cids[0].Receipts)).To(Equal(4))
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[0].CID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[0].CID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[1].CID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[2].CID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[2].CID)).To(BeTrue())
@ -277,7 +284,7 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids1)).To(Equal(1)) Expect(len(cids1)).To(Equal(1))
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids1[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids1[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids1[0].Transactions)).To(Equal(0)) Expect(len(cids1[0].Transactions)).To(Equal(0))
Expect(len(cids1[0].StateNodes)).To(Equal(0)) Expect(len(cids1[0].StateNodes)).To(Equal(0))
Expect(len(cids1[0].StorageNodes)).To(Equal(0)) Expect(len(cids1[0].StorageNodes)).To(Equal(0))
@ -292,7 +299,7 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids2)).To(Equal(1)) Expect(len(cids2)).To(Equal(1))
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids2[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids2[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids2[0].Transactions)).To(Equal(0)) Expect(len(cids2[0].Transactions)).To(Equal(0))
Expect(len(cids2[0].StateNodes)).To(Equal(0)) Expect(len(cids2[0].StateNodes)).To(Equal(0))
Expect(len(cids2[0].StorageNodes)).To(Equal(0)) Expect(len(cids2[0].StorageNodes)).To(Equal(0))
@ -307,7 +314,7 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids3)).To(Equal(1)) Expect(len(cids3)).To(Equal(1))
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids3[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids3[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids3[0].Transactions)).To(Equal(0)) Expect(len(cids3[0].Transactions)).To(Equal(0))
Expect(len(cids3[0].StateNodes)).To(Equal(0)) Expect(len(cids3[0].StateNodes)).To(Equal(0))
Expect(len(cids3[0].StorageNodes)).To(Equal(0)) Expect(len(cids3[0].StorageNodes)).To(Equal(0))
@ -322,7 +329,7 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids4)).To(Equal(1)) Expect(len(cids4)).To(Equal(1))
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids4[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids4[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids4[0].Transactions)).To(Equal(0)) Expect(len(cids4[0].Transactions)).To(Equal(0))
Expect(len(cids4[0].StateNodes)).To(Equal(0)) Expect(len(cids4[0].StateNodes)).To(Equal(0))
Expect(len(cids4[0].StorageNodes)).To(Equal(0)) Expect(len(cids4[0].StorageNodes)).To(Equal(0))
@ -337,14 +344,14 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids5)).To(Equal(1)) Expect(len(cids5)).To(Equal(1))
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids5[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids5[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids5[0].Transactions)).To(Equal(3)) Expect(len(cids5[0].Transactions)).To(Equal(4))
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
Expect(len(cids5[0].StateNodes)).To(Equal(0)) Expect(len(cids5[0].StateNodes)).To(Equal(0))
Expect(len(cids5[0].StorageNodes)).To(Equal(0)) Expect(len(cids5[0].StorageNodes)).To(Equal(0))
Expect(len(cids5[0].Receipts)).To(Equal(3)) Expect(len(cids5[0].Receipts)).To(Equal(4))
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct1CID.String())).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct1CID.String())).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct2CID.String())).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct2CID.String())).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct3CID.String())).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct3CID.String())).To(BeTrue())
@ -354,7 +361,7 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids6)).To(Equal(1)) Expect(len(cids6)).To(Equal(1))
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids6[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids6[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids6[0].Transactions)).To(Equal(1)) Expect(len(cids6[0].Transactions)).To(Equal(1))
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1] expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
expectedTxCID.ID = cids6[0].Transactions[0].ID expectedTxCID.ID = cids6[0].Transactions[0].ID
@ -373,12 +380,12 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids7)).To(Equal(1)) Expect(len(cids7)).To(Equal(1))
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids7[0].Header).To(Equal(eth2.HeaderModel{})) Expect(cids7[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids7[0].Transactions)).To(Equal(0)) Expect(len(cids7[0].Transactions)).To(Equal(0))
Expect(len(cids7[0].Receipts)).To(Equal(0)) Expect(len(cids7[0].Receipts)).To(Equal(0))
Expect(len(cids7[0].StorageNodes)).To(Equal(0)) Expect(len(cids7[0].StorageNodes)).To(Equal(0))
Expect(len(cids7[0].StateNodes)).To(Equal(1)) Expect(len(cids7[0].StateNodes)).To(Equal(1))
Expect(cids7[0].StateNodes[0]).To(Equal(eth2.StateNodeModel{ Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
ID: cids7[0].StateNodes[0].ID, ID: cids7[0].StateNodes[0].ID,
HeaderID: cids7[0].StateNodes[0].HeaderID, HeaderID: cids7[0].StateNodes[0].HeaderID,
NodeType: 2, NodeType: 2,
@ -400,8 +407,12 @@ var _ = Describe("Retriever", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
err := repo.Publish(test_helpers.MockConvertedPayload) tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
@ -410,8 +421,12 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101) payload.Block = newMockBlock(1010101)
err := repo.Publish(payload) tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))
@ -422,10 +437,16 @@ var _ = Describe("Retriever", func() {
payload1.Block = newMockBlock(1010101) payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.Block = newMockBlock(5) payload2.Block = newMockBlock(5)
err := repo.Publish(payload1) tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Publish(payload2) err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5))) Expect(num).To(Equal(int64(5)))
@ -438,8 +459,11 @@ var _ = Describe("Retriever", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
err := repo.Publish(test_helpers.MockConvertedPayload) tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
@ -448,8 +472,12 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101) payload.Block = newMockBlock(1010101)
err := repo.Publish(payload) tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))
@ -460,10 +488,16 @@ var _ = Describe("Retriever", func() {
payload1.Block = newMockBlock(1010101) payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.Block = newMockBlock(5) payload2.Block = newMockBlock(5)
err := repo.Publish(payload1) tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = repo.Publish(payload2) err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))

View File

@ -21,6 +21,7 @@ import (
"context" "context"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -29,15 +30,15 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
pgipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
@ -74,13 +75,18 @@ var _ = Describe("eth state reading tests", func() {
It("test init", func() { It("test init", func() {
// db and type initializations // db and type initializations
var err error var err error
db, err = shared.SetupDB() db, err = SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
transformer := eth2.NewStateDiffTransformer(chainConfig, db) transformer := indexer.NewStateDiffIndexer(chainConfig, db)
backend, err = eth.NewEthBackend(db, &eth.Config{ backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig, ChainConfig: chainConfig,
VmConfig: vm.Config{}, VmConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call. RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
CacheConfig: pgipfsethdb.CacheConfig{
Name: "eth_state",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
},
}) })
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
api = eth.NewPublicEthAPI(backend, nil, false) api = eth.NewPublicEthAPI(backend, nil, false)
@ -135,31 +141,39 @@ var _ = Describe("eth state reading tests", func() {
} }
diff, err := builder.BuildStateDiffObject(args, params) diff, err := builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
diffRlp, err := rlp.EncodeToBytes(diff) tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
blockRlp, err := rlp.EncodeToBytes(block)
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
receiptsRlp, err := rlp.EncodeToBytes(rcts)
Expect(err).ToNot(HaveOccurred())
payload := statediff.Payload{
StateObjectRlp: diffRlp,
BlockRlp: blockRlp,
ReceiptsRlp: receiptsRlp,
TotalDifficulty: mockTD,
} }
_, err = transformer.Transform(0, payload) err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
} }
// Insert some non-canonical data into the database so that we test our ability to discern canonicity // Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := eth2.NewIPLDPublisher(db) indexAndPublisher := indexer.NewStateDiffIndexer(chainConfig, db)
api = eth.NewPublicEthAPI(backend, nil, false)
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload) tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child // The non-canonical header has a child
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayloadForChild) tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
hash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, hash)
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
defer It("test teardown", func() { defer It("test teardown", func() {

View File

@ -19,22 +19,20 @@ package eth
import ( import (
"bytes" "bytes"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth" "github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
) )
// Filterer interface for substituing mocks in tests // Filterer interface for substituing mocks in tests
type Filterer interface { type Filterer interface {
Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error)
} }
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum // ResponseFilterer satisfies the ResponseFilterer interface for ethereum
@ -46,7 +44,7 @@ func NewResponseFilterer() *ResponseFilterer {
} }
// Filter is used to filter through eth data to extract and package requested data into a Payload // Filter is used to filter through eth data to extract and package requested data into a Payload
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error) { func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) {
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) { if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
response := new(IPLDs) response := new(IPLDs)
response.TotalDifficulty = payload.TotalDifficulty response.TotalDifficulty = payload.TotalDifficulty
@ -73,7 +71,7 @@ func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload eth.Conve
return nil, nil return nil, nil
} }
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload eth.ConvertedPayload) error { func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
if !headerFilter.Off { if !headerFilter.Off {
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header()) headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
if err != nil { if err != nil {
@ -115,7 +113,7 @@ func checkRange(start, end, actual int64) bool {
return false return false
} }
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload eth.ConvertedPayload) ([]common.Hash, error) { func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) {
var trxHashes []common.Hash var trxHashes []common.Hash
if !trxFilter.Off { if !trxFilter.Off {
trxLen := len(payload.Block.Body().Transactions) trxLen := len(payload.Block.Body().Transactions)
@ -163,13 +161,22 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
return false return false
} }
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload eth.ConvertedPayload, trxHashes []common.Hash) error { func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off { if !receiptFilter.Off {
response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts)) response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
for i, receipt := range payload.Receipts { for _, receipt := range payload.Receipts {
// topics is always length 4 // topics is always length 4
topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s} topics := make([][]string, 4)
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, payload.ReceiptMetaData[i].LogContracts, trxHashes) { contracts := make([]string, len(receipt.Logs))
for _, l := range receipt.Logs {
contracts = append(contracts, l.Address.String())
for idx, t := range l.Topics {
topics[idx] = append(topics[idx], t.String())
}
}
// TODO: Verify this filter logic.
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
receiptBuffer := new(bytes.Buffer) receiptBuffer := new(bytes.Buffer)
if err := receipt.EncodeRLP(receiptBuffer); err != nil { if err := receipt.EncodeRLP(receiptBuffer); err != nil {
return err return err
@ -253,7 +260,7 @@ func slicesShareString(slice1, slice2 []string) int {
} }
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters // filterStateAndStorage filters state and storage nodes into the response according to the provided filters
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload eth.ConvertedPayload) error { func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error {
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes)) response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
response.StorageNodes = make([]StorageNode, 0) response.StorageNodes = make([]StorageNode, 0)
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses)) stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
@ -270,37 +277,37 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
} }
for _, stateNode := range payload.StateNodes { for _, stateNode := range payload.StateNodes {
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) { if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
if stateNode.Type == sdtypes.Leaf || stateFilter.IntermediateNodes { if stateNode.NodeType == sdtypes.Leaf || stateFilter.IntermediateNodes {
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.Value, multihash.KECCAK_256) cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.NodeValue, multihash.KECCAK_256)
if err != nil { if err != nil {
return err return err
} }
response.StateNodes = append(response.StateNodes, StateNode{ response.StateNodes = append(response.StateNodes, StateNode{
StateLeafKey: stateNode.LeafKey, StateLeafKey: common.BytesToHash(stateNode.LeafKey),
Path: stateNode.Path, Path: stateNode.Path,
IPLD: ipfs.BlockModel{ IPLD: ipfs.BlockModel{
Data: stateNode.Value, Data: stateNode.NodeValue,
CID: cid.String(), CID: cid.String(),
}, },
Type: stateNode.Type, Type: stateNode.NodeType,
}) })
} }
} }
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) { if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] { for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) { if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.Value, multihash.KECCAK_256) cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.NodeValue, multihash.KECCAK_256)
if err != nil { if err != nil {
return err return err
} }
response.StorageNodes = append(response.StorageNodes, StorageNode{ response.StorageNodes = append(response.StorageNodes, StorageNode{
StateLeafKey: stateNode.LeafKey, StateLeafKey: common.BytesToHash(stateNode.LeafKey),
StorageLeafKey: storageNode.LeafKey, StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
IPLD: ipfs.BlockModel{ IPLD: ipfs.BlockModel{
Data: storageNode.Value, Data: storageNode.NodeValue,
CID: cid.String(), CID: cid.String(),
}, },
Type: storageNode.Type, Type: storageNode.NodeType,
Path: storageNode.Path, Path: storageNode.Path,
}) })
} }
@ -310,13 +317,13 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
return nil return nil
} }
func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool { func checkNodeKeys(wantedKeys []common.Hash, actualKey []byte) bool {
// If we aren't filtering for any specific keys, all nodes are a go // If we aren't filtering for any specific keys, all nodes are a go
if len(wantedKeys) == 0 { if len(wantedKeys) == 0 {
return true return true
} }
for _, key := range wantedKeys { for _, key := range wantedKeys {
if bytes.Equal(key.Bytes(), actualKey.Bytes()) { if bytes.Equal(key.Bytes(), actualKey) {
return true return true
} }
} }

View File

@ -19,13 +19,12 @@ package eth_test
import ( import (
"bytes" "bytes"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
sdtypes "github.com/ethereum/go-ethereum/statediff/types" sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
"github.com/vulcanize/ipld-eth-server/pkg/shared" "github.com/vulcanize/ipld-eth-server/pkg/shared"
@ -49,11 +48,11 @@ var _ = Describe("Filterer", func() {
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header)) Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
var expectedEmptyUncles []ipfs.BlockModel var expectedEmptyUncles []ipfs.BlockModel
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles)) Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
Expect(len(iplds.Transactions)).To(Equal(3)) Expect(len(iplds.Transactions)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue())
Expect(len(iplds.Receipts)).To(Equal(3)) Expect(len(iplds.Receipts)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3)).To(BeTrue())
@ -143,13 +142,13 @@ var _ = Describe("Filterer", func() {
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds5.Header).To(Equal(ipfs.BlockModel{})) Expect(iplds5.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds5.Uncles)).To(Equal(0)) Expect(len(iplds5.Uncles)).To(Equal(0))
Expect(len(iplds5.Transactions)).To(Equal(3)) Expect(len(iplds5.Transactions)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue())
Expect(len(iplds5.StorageNodes)).To(Equal(0)) Expect(len(iplds5.StorageNodes)).To(Equal(0))
Expect(len(iplds5.StateNodes)).To(Equal(0)) Expect(len(iplds5.StateNodes)).To(Equal(0))
Expect(len(iplds5.Receipts)).To(Equal(3)) Expect(len(iplds5.Receipts)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3)).To(BeTrue())

View File

@ -22,13 +22,11 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/shared" "github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
@ -104,7 +102,7 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
} }
// FetchHeaders fetches headers // FetchHeaders fetches headers
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c eth.HeaderModel) (ipfs.BlockModel, error) { func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
@ -117,7 +115,7 @@ func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c eth.HeaderModel) (ipfs.BlockMod
} }
// FetchUncles fetches uncles // FetchUncles fetches uncles
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []eth.UncleModel) ([]ipfs.BlockModel, error) { func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching uncle iplds") log.Debug("fetching uncle iplds")
uncleIPLDs := make([]ipfs.BlockModel, len(cids)) uncleIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
@ -134,7 +132,7 @@ func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []eth.UncleModel) ([]ipfs.Bl
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []eth.TxModel) ([]ipfs.BlockModel, error) { func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxIPLDs := make([]ipfs.BlockModel, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
@ -151,7 +149,7 @@ func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []eth.TxModel) ([]ipfs.BlockMo
} }
// FetchRcts fetches receipts // FetchRcts fetches receipts
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []eth.ReceiptModel) ([]ipfs.BlockModel, error) { func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching receipt iplds") log.Debug("fetching receipt iplds")
rctIPLDs := make([]ipfs.BlockModel, len(cids)) rctIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
@ -168,7 +166,7 @@ func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []eth.ReceiptModel) ([]ipfs.Bl
} }
// FetchState fetches state nodes // FetchState fetches state nodes
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]StateNode, error) { func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]StateNode, error) {
log.Debug("fetching state iplds") log.Debug("fetching state iplds")
stateNodes := make([]StateNode, 0, len(cids)) stateNodes := make([]StateNode, 0, len(cids))
for _, stateNode := range cids { for _, stateNode := range cids {
@ -193,7 +191,7 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]Stat
} }
// FetchStorage fetches storage nodes // FetchStorage fetches storage nodes
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []eth.StorageNodeWithStateKeyModel) ([]StorageNode, error) { func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
log.Debug("fetching storage iplds") log.Debug("fetching storage iplds")
storageNodes := make([]StorageNode, 0, len(cids)) storageNodes := make([]StorageNode, 0, len(cids))
for _, storageNode := range cids { for _, storageNode := range cids {

View File

@ -17,12 +17,11 @@
package eth_test package eth_test
import ( import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
@ -31,18 +30,28 @@ import (
var _ = Describe("IPLDFetcher", func() { var _ = Describe("IPLDFetcher", func() {
var ( var (
db *postgres.DB db *postgres.DB
pubAndIndexer *eth2.IPLDPublisher pubAndIndexer *indexer.StateDiffIndexer
fetcher *eth.IPLDFetcher fetcher *eth.IPLDFetcher
) )
Describe("Fetch", func() { Describe("Fetch", func() {
BeforeEach(func() { BeforeEach(func() {
var err error var (
db, err = shared.SetupDB() err error
tx *indexer.BlockTx
)
db, err = SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
pubAndIndexer = eth2.NewIPLDPublisher(db) pubAndIndexer = indexer.NewStateDiffIndexer(params.TestChainConfig, db)
err = pubAndIndexer.Publish(test_helpers.MockConvertedPayload) tx, err = pubAndIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
for _, node := range test_helpers.MockStateNodes {
err = pubAndIndexer.PushStateNode(tx, node)
Expect(err).ToNot(HaveOccurred())
}
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
fetcher = eth.NewIPLDFetcher(db) fetcher = eth.NewIPLDFetcher(db)
}) })
AfterEach(func() { AfterEach(func() {
eth.TearDownDB(db) eth.TearDownDB(db)

View File

@ -22,9 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
) )
const ( const (

View File

@ -17,10 +17,9 @@
package eth package eth
import ( import (
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
) )
// TearDownDB is used to tear down the watcher dbs after tests // TearDownDB is used to tear down the watcher dbs after tests
@ -40,13 +39,15 @@ func TearDownDB(db *postgres.DB) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM blocks`) _, err = tx.Exec(`DELETE FROM blocks`)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
Expect(err).NotTo(HaveOccurred())
err = tx.Commit() err = tx.Commit()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string // TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
func TxModelsContainsCID(txs []eth.TxModel, cid string) bool { func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
for _, tx := range txs { for _, tx := range txs {
if tx.CID == cid { if tx.CID == cid {
return true return true
@ -56,7 +57,7 @@ func TxModelsContainsCID(txs []eth.TxModel, cid string) bool {
} }
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array // ListContainsBytes used to check if a list of byte arrays contains a particular byte array
func ReceiptModelsContainsCID(rcts []eth.ReceiptModel, cid string) bool { func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
for _, rct := range rcts { for _, rct := range rcts {
if rct.CID == cid { if rct.CID == cid {
return true return true

View File

@ -23,27 +23,25 @@ import (
"crypto/rand" "crypto/rand"
"math/big" "math/big"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/statediff/testhelpers" "github.com/ethereum/go-ethereum/statediff/testhelpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
blocks "github.com/ipfs/go-block-format" blocks "github.com/ipfs/go-block-format"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
eth2 "github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
// Test variables // Test variables
@ -97,6 +95,8 @@ var (
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header()) MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592") Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593") AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce()) ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String() ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5} MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
@ -104,6 +104,12 @@ var (
mockTopic12 = common.HexToHash("0x06") mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05") mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07") mockTopic22 = common.HexToHash("0x07")
mockTopic31 = common.HexToHash("0x08")
mockTopic41 = common.HexToHash("0x09")
mockTopic42 = common.HexToHash("0x0a")
mockTopic43 = common.HexToHash("0x0b")
mockTopic51 = common.HexToHash("0x0c")
mockTopic61 = common.HexToHash("0x0d")
MockLog1 = &types.Log{ MockLog1 = &types.Log{
Address: Address, Address: Address,
Topics: []common.Hash{mockTopic11, mockTopic12}, Topics: []common.Hash{mockTopic11, mockTopic12},
@ -120,14 +126,49 @@ var (
TxIndex: 1, TxIndex: 1,
Index: 1, Index: 1,
} }
MockLog3 = &types.Log{
Address: AnotherAddress1,
Topics: []common.Hash{mockTopic31},
Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 2,
Index: 2,
}
MockLog4 = &types.Log{
Address: AnotherAddress1,
Topics: []common.Hash{mockTopic41, mockTopic42, mockTopic43},
Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 2,
Index: 3,
}
MockLog5 = &types.Log{
Address: AnotherAddress1,
Topics: []common.Hash{mockTopic51},
Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 2,
Index: 4,
}
MockLog6 = &types.Log{
Address: AnotherAddress2,
Topics: []common.Hash{mockTopic61},
Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 3,
Index: 5,
}
Tx1 = GetTxnRlp(0, MockTransactions) Tx1 = GetTxnRlp(0, MockTransactions)
Tx2 = GetTxnRlp(1, MockTransactions) Tx2 = GetTxnRlp(1, MockTransactions)
Tx3 = GetTxnRlp(2, MockTransactions) Tx3 = GetTxnRlp(2, MockTransactions)
Tx4 = GetTxnRlp(3, MockTransactions)
Rct1 = GetRctRlp(0, MockReceipts) Rct1 = GetRctRlp(0, MockReceipts)
Rct2 = GetRctRlp(1, MockReceipts) Rct2 = GetRctRlp(1, MockReceipts)
Rct3 = GetRctRlp(2, MockReceipts) Rct3 = GetRctRlp(2, MockReceipts)
Rct4 = GetRctRlp(3, MockReceipts)
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256) HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID) HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
@ -137,19 +178,23 @@ var (
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID) Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256) Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256)
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID) Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx4, multihash.KECCAK_256)
Trx4MhKey = shared.MultihashKeyFromCID(Trx4CID)
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct1, multihash.KECCAK_256) Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct1, multihash.KECCAK_256)
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID) Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct2, multihash.KECCAK_256) Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct2, multihash.KECCAK_256)
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID) Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct3, multihash.KECCAK_256) Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct3, multihash.KECCAK_256)
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID) Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
Rct4CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, Rct4, multihash.KECCAK_256)
Rct4MhKey = shared.MultihashKeyFromCID(Rct4CID)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256) State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
State1MhKey = shared.MultihashKeyFromCID(State1CID) State1MhKey = shared.MultihashKeyFromCID(State1CID)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256) State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
State2MhKey = shared.MultihashKeyFromCID(State2CID) State2MhKey = shared.MultihashKeyFromCID(State2CID)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256) StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
StorageMhKey = shared.MultihashKeyFromCID(StorageCID) StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
MockTrxMeta = []eth.TxModel{ MockTrxMeta = []models.TxModel{
{ {
CID: "", // This is empty until we go to publish to ipfs CID: "", // This is empty until we go to publish to ipfs
MhKey: "", MhKey: "",
@ -177,8 +222,17 @@ var (
TxHash: MockTransactions[2].Hash().String(), TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode, Data: MockContractByteCode,
}, },
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: "",
Index: 3,
TxHash: MockTransactions[3].Hash().String(),
Data: []byte{},
},
} }
MockTrxMetaPostPublsh = []eth.TxModel{ MockTrxMetaPostPublsh = []models.TxModel{
{ {
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
MhKey: Trx1MhKey, MhKey: Trx1MhKey,
@ -206,83 +260,66 @@ var (
TxHash: MockTransactions[2].Hash().String(), TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode, Data: MockContractByteCode,
}, },
{
CID: Trx4CID.String(),
MhKey: Trx4MhKey,
Src: SenderAddr.Hex(),
Dst: "",
Index: 2,
TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode,
},
} }
MockRctMeta = []eth.ReceiptModel{ MockRctMeta = []models.ReceiptModel{
{ {
CID: "", CID: "",
MhKey: "", MhKey: "",
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
Address.String(),
},
}, },
{ {
CID: "", CID: "",
MhKey: "", MhKey: "",
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
}, },
{ {
CID: "", CID: "",
MhKey: "", MhKey: "",
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
LogContracts: []string{}, },
{
CID: "",
MhKey: "",
Contract: "",
ContractHash: "",
}, },
} }
MockRctMetaPostPublish = []eth.ReceiptModel{ MockRctMetaPostPublish = []models.ReceiptModel{
{ {
CID: Rct1CID.String(), CID: Rct1CID.String(),
MhKey: Rct1MhKey, MhKey: Rct1MhKey,
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
Address.String(),
},
}, },
{ {
CID: Rct2CID.String(), CID: Rct2CID.String(),
MhKey: Rct2MhKey, MhKey: Rct2MhKey,
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
}, },
{ {
CID: Rct3CID.String(), CID: Rct3CID.String(),
MhKey: Rct3MhKey, MhKey: Rct3MhKey,
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
LogContracts: []string{}, },
{
CID: Rct4CID.String(),
MhKey: Rct4MhKey,
Contract: "",
ContractHash: "",
}, },
} }
@ -331,21 +368,30 @@ var (
Account, Account,
}) })
MockStateNodes = []eth.TrieNode{ MockStateNodes = []sdtypes.StateNode{
{ {
LeafKey: common.BytesToHash(ContractLeafKey), LeafKey: ContractLeafKey,
Path: []byte{'\x06'}, Path: []byte{'\x06'},
Value: ContractLeafNode, NodeValue: ContractLeafNode,
Type: sdtypes.Leaf, NodeType: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{
{
Path: []byte{},
NodeType: sdtypes.Leaf,
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
},
},
}, },
{ {
LeafKey: common.BytesToHash(AccountLeafKey), LeafKey: AccountLeafKey,
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
Value: AccountLeafNode, NodeValue: AccountLeafNode,
Type: sdtypes.Leaf, NodeType: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{},
}, },
} }
MockStateMetaPostPublish = []eth.StateNodeModel{ MockStateMetaPostPublish = []models.StateNodeModel{
{ {
CID: State1CID.String(), CID: State1CID.String(),
MhKey: State1MhKey, MhKey: State1MhKey,
@ -361,12 +407,12 @@ var (
StateKey: common.BytesToHash(AccountLeafKey).Hex(), StateKey: common.BytesToHash(AccountLeafKey).Hex(),
}, },
} }
MockStorageNodes = map[string][]eth.TrieNode{ MockStorageNodes = map[string][]sdtypes.StorageNode{
contractPath: { contractPath: {
{ {
LeafKey: common.BytesToHash(StorageLeafKey), LeafKey: StorageLeafKey,
Value: StorageLeafNode, NodeValue: StorageLeafNode,
Type: sdtypes.Leaf, NodeType: sdtypes.Leaf,
Path: []byte{}, Path: []byte{},
}, },
}, },
@ -391,11 +437,11 @@ var (
StateNodes: MockStateNodes, StateNodes: MockStateNodes,
} }
Reward = eth.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts) Reward = indexer.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts)
MockCIDWrapper = &eth2.CIDWrapper{ MockCIDWrapper = &eth.CIDWrapper{
BlockNumber: new(big.Int).Set(BlockNumber), BlockNumber: new(big.Int).Set(BlockNumber),
Header: eth.HeaderModel{ Header: models.HeaderModel{
BlockNumber: "1", BlockNumber: "1",
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000", ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
@ -413,9 +459,9 @@ var (
}, },
Transactions: MockTrxMetaPostPublsh, Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish, Receipts: MockRctMetaPostPublish,
Uncles: []eth.UncleModel{}, Uncles: []models.UncleModel{},
StateNodes: MockStateMetaPostPublish, StateNodes: MockStateMetaPostPublish,
StorageNodes: []eth.StorageNodeWithStateKeyModel{ StorageNodes: []models.StorageNodeWithStateKeyModel{
{ {
Path: []byte{}, Path: []byte{},
CID: StorageCID.String(), CID: StorageCID.String(),
@ -431,14 +477,16 @@ var (
Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID) Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID)
Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID) Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID)
Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID) Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID)
Trx4IPLD, _ = blocks.NewBlockWithCid(Tx4, Trx4CID)
Rct1IPLD, _ = blocks.NewBlockWithCid(Rct1, Rct1CID) Rct1IPLD, _ = blocks.NewBlockWithCid(Rct1, Rct1CID)
Rct2IPLD, _ = blocks.NewBlockWithCid(Rct2, Rct2CID) Rct2IPLD, _ = blocks.NewBlockWithCid(Rct2, Rct2CID)
Rct3IPLD, _ = blocks.NewBlockWithCid(Rct3, Rct3CID) Rct3IPLD, _ = blocks.NewBlockWithCid(Rct3, Rct3CID)
Rct4IPLD, _ = blocks.NewBlockWithCid(Rct4, Rct4CID)
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID) State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID) State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID) StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
MockIPLDs = eth2.IPLDs{ MockIPLDs = eth.IPLDs{
BlockNumber: new(big.Int).Set(BlockNumber), BlockNumber: new(big.Int).Set(BlockNumber),
Header: ipfs.BlockModel{ Header: ipfs.BlockModel{
Data: HeaderIPLD.RawData(), Data: HeaderIPLD.RawData(),
@ -457,6 +505,10 @@ var (
Data: Trx3IPLD.RawData(), Data: Trx3IPLD.RawData(),
CID: Trx3IPLD.Cid().String(), CID: Trx3IPLD.Cid().String(),
}, },
{
Data: Trx4IPLD.RawData(),
CID: Trx4IPLD.Cid().String(),
},
}, },
Receipts: []ipfs.BlockModel{ Receipts: []ipfs.BlockModel{
{ {
@ -471,8 +523,12 @@ var (
Data: Rct3IPLD.RawData(), Data: Rct3IPLD.RawData(),
CID: Rct3IPLD.Cid().String(), CID: Rct3IPLD.Cid().String(),
}, },
{
Data: Rct4IPLD.RawData(),
CID: Rct4IPLD.Cid().String(),
}, },
StateNodes: []eth2.StateNode{ },
StateNodes: []eth.StateNode{
{ {
StateLeafKey: common.BytesToHash(ContractLeafKey), StateLeafKey: common.BytesToHash(ContractLeafKey),
Type: sdtypes.Leaf, Type: sdtypes.Leaf,
@ -492,7 +548,7 @@ var (
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
}, },
}, },
StorageNodes: []eth2.StorageNode{ StorageNodes: []eth.StorageNode{
{ {
StateLeafKey: common.BytesToHash(ContractLeafKey), StateLeafKey: common.BytesToHash(ContractLeafKey),
StorageLeafKey: common.BytesToHash(StorageLeafKey), StorageLeafKey: common.BytesToHash(StorageLeafKey),
@ -516,46 +572,8 @@ var (
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
MockLondonTransactions, MockLondonReceipts, SenderAdd = createDynamicTransactionsAndReceipts(LondonBlockNum) MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(LondonBlockNum)
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie)) MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie))
MockLondonTrxMeta = []eth.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
MhKey: "",
Src: SenderAdd.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockLondonTransactions[0].Hash().String(),
Data: []byte{},
},
}
MockLondonRctMeta = []eth.ReceiptModel{
{
CID: "",
MhKey: "",
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "",
ContractHash: "",
LogContracts: []string{
Address.String(),
},
},
}
MockConvertedLondonPayload = eth.ConvertedPayload{
TotalDifficulty: MockLondonBlock.Difficulty(),
Block: MockLondonBlock,
Receipts: MockLondonReceipts,
TxMetaData: MockLondonTrxMeta,
ReceiptMetaData: MockLondonRctMeta,
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
) )
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block { func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block {
@ -605,8 +623,8 @@ func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transacti
// TODO: Change the receipt type to DynamicFeeTxType once this PR is merged. // TODO: Change the receipt type to DynamicFeeTxType once this PR is merged.
// https://github.com/ethereum/go-ethereum/pull/22806 // https://github.com/ethereum/go-ethereum/pull/22806
mockReceipt1 := &types.Receipt{ mockReceipt1 := &types.Receipt{
Type: types.AccessListTxType, Type: types.DynamicFeeTxType,
PostState: common.HexToHash("0x1").Bytes(), PostState: common.HexToHash("0x0").Bytes(),
Status: types.ReceiptStatusSuccessful, Status: types.ReceiptStatusSuccessful,
CumulativeGasUsed: 50, CumulativeGasUsed: 50,
Logs: []*types.Log{}, Logs: []*types.Log{},
@ -622,6 +640,7 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{}) trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{}) trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode) trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
trx4 := types.NewTransaction(3, AnotherAddress1, big.NewInt(2000), 100, big.NewInt(200), []byte{})
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber)) transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
mockCurve := elliptic.P256() mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader) mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
@ -640,12 +659,16 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
signedTrx4, err := types.SignTx(trx4, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err)
}
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// make receipts // make receipts
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50) mockReceipt1 := types.NewReceipt(nil, false, 50)
hash1 := signedTrx1.Hash() hash1 := signedTrx1.Hash()
MockLog1.TxHash = hash1 MockLog1.TxHash = hash1
@ -663,11 +686,17 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
mockReceipt2.GasUsed = mockReceipt2.CumulativeGasUsed - mockReceipt1.CumulativeGasUsed mockReceipt2.GasUsed = mockReceipt2.CumulativeGasUsed - mockReceipt1.CumulativeGasUsed
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 175) mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 175)
mockReceipt3.Logs = []*types.Log{} mockReceipt3.Logs = []*types.Log{MockLog3, MockLog4, MockLog5}
mockReceipt3.TxHash = signedTrx3.Hash() mockReceipt3.TxHash = signedTrx3.Hash()
mockReceipt3.GasUsed = mockReceipt3.CumulativeGasUsed - mockReceipt2.CumulativeGasUsed mockReceipt3.GasUsed = mockReceipt3.CumulativeGasUsed - mockReceipt2.CumulativeGasUsed
return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, SenderAddr // Receipt with failed status.
mockReceipt4 := types.NewReceipt(nil, true, 250)
mockReceipt4.Logs = []*types.Log{MockLog6}
mockReceipt4.TxHash = signedTrx4.Hash()
mockReceipt4.GasUsed = mockReceipt4.CumulativeGasUsed - mockReceipt3.CumulativeGasUsed
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4}, SenderAddr
} }
func GetTxnRlp(num int, txs types.Transactions) []byte { func GetTxnRlp(num int, txs types.Transactions) []byte {

View File

@ -22,9 +22,9 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types" sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
) )
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@ -146,10 +146,43 @@ type StorageNode struct {
// Passed to IPLDFetcher // Passed to IPLDFetcher
type CIDWrapper struct { type CIDWrapper struct {
BlockNumber *big.Int BlockNumber *big.Int
Header eth.HeaderModel Header models.HeaderModel
Uncles []eth.UncleModel Uncles []models.UncleModel
Transactions []eth.TxModel Transactions []models.TxModel
Receipts []eth.ReceiptModel Receipts []models.ReceiptModel
StateNodes []eth.StateNodeModel StateNodes []models.StateNodeModel
StorageNodes []eth.StorageNodeWithStateKeyModel StorageNodes []models.StorageNodeWithStateKeyModel
}
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
type ConvertedPayload struct {
TotalDifficulty *big.Int
Block *types.Block
TxMetaData []models.TxModel
Receipts types.Receipts
ReceiptMetaData []models.ReceiptModel
StateNodes []sdtypes.StateNode
StorageNodes map[string][]sdtypes.StorageNode
}
// LogResult represent a log.
type LogResult struct {
LeafCID string `db:"leaf_cid"`
ReceiptID int64 `db:"receipt_id"`
Address string `db:"address"`
Index int64 `db:"index"`
Data []byte `db:"log_data"`
Topic0 string `db:"topic0"`
Topic1 string `db:"topic1"`
Topic2 string `db:"topic2"`
Topic3 string `db:"topic3"`
LogLeafData []byte `db:"data"`
RctCID string `db:"cid"`
RctStatus uint64 `db:"post_status"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
TxnIndex int64 `db:"txn_index"`
TxHash string `db:"tx_hash"`
} }

View File

@ -24,6 +24,8 @@ type LogResponse struct {
Topics []common.Hash `json:"topics"` Topics []common.Hash `json:"topics"`
Data hexutil.Bytes `json:"data"` Data hexutil.Bytes `json:"data"`
Transaction TransactionResp `json:"transaction"` Transaction TransactionResp `json:"transaction"`
ReceiptCID string `json:"receiptCID"`
Status int32 `json:"status"`
} }
type TransactionResp struct { type TransactionResp struct {
@ -43,18 +45,23 @@ func NewClient(endpoint string) *Client {
return &Client{client: client} return &Client{client: client}
} }
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address common.Address) ([]LogResponse, error) { func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
getLogsQuery := fmt.Sprintf(` params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
query{ if address != nil {
getLogs(blockHash: "%s", contract: "%s") { params += fmt.Sprintf(`, contract: "%s"`, address.String())
}
getLogsQuery := fmt.Sprintf(`query{
getLogs(%s) {
data data
topics topics
transaction { transaction {
hash hash
} }
status
receiptCID
} }
} }`, params)
`, hash.String(), address.String())
req := gqlclient.NewRequest(getLogsQuery) req := gqlclient.NewRequest(getLogsQuery)
req.Header.Set("Cache-Control", "no-cache") req.Header.Set("Cache-Control", "no-cache")

View File

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
@ -94,14 +93,18 @@ type Log struct {
transaction *Transaction transaction *Transaction
log *types.Log log *types.Log
cid string cid string
ipldBlock []byte receiptCID string
ipldBlock []byte // log leaf node IPLD block data
status uint64
} }
func (l *Log) Transaction(ctx context.Context) *Transaction { // Transaction returns transaction that generated this log entry.
func (l *Log) Transaction(_ context.Context) *Transaction {
return l.transaction return l.transaction
} }
func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account { // Account returns the contract account which generated this log.
func (l *Log) Account(_ context.Context, args BlockNumberArgs) *Account {
return &Account{ return &Account{
backend: l.backend, backend: l.backend,
address: l.log.Address, address: l.log.Address,
@ -109,24 +112,39 @@ func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
} }
} }
func (l *Log) Index(ctx context.Context) int32 { // Index returns the index of this log in the block
func (l *Log) Index(_ context.Context) int32 {
return int32(l.log.Index) return int32(l.log.Index)
} }
func (l *Log) Topics(ctx context.Context) []common.Hash { // Topics returns the list of 0-4 indexed topics for the log.
func (l *Log) Topics(_ context.Context) []common.Hash {
return l.log.Topics return l.log.Topics
} }
func (l *Log) Data(ctx context.Context) hexutil.Bytes { // Data returns data of this log.
return hexutil.Bytes(l.log.Data) func (l *Log) Data(_ context.Context) hexutil.Bytes {
return l.log.Data
} }
func (l *Log) Cid(ctx context.Context) string { // Cid returns cid of the leaf node of this log.
func (l *Log) Cid(_ context.Context) string {
return l.cid return l.cid
} }
func (l *Log) IpldBlock(ctx context.Context) hexutil.Bytes { // IpldBlock returns IPLD block of the leaf node of this log.
return hexutil.Bytes(l.ipldBlock) func (l *Log) IpldBlock(_ context.Context) hexutil.Bytes {
return l.ipldBlock
}
// Status returns the status of the receipt IPLD block this Log exists in.
func (l *Log) Status(_ context.Context) int32 {
return int32(l.status)
}
// ReceiptCID returns the receipt CID of the receipt IPLD block this Log exists in.
func (l *Log) ReceiptCID(_ context.Context) string {
return l.receiptCID
} }
// Transaction represents an Ethereum transaction. // Transaction represents an Ethereum transaction.
@ -1007,40 +1025,90 @@ func (r *Resolver) GetLogs(ctx context.Context, args struct {
BlockHash common.Hash BlockHash common.Hash
Contract *common.Address Contract *common.Address
}) (*[]*Log, error) { }) (*[]*Log, error) {
ret := make([]*Log, 0, 10)
receiptCIDs, receiptsBytes, txs, err := r.backend.IPLDRetriever.RetrieveReceiptsByBlockHash(args.BlockHash) var filter eth.ReceiptFilter
if args.Contract != nil {
filter.LogAddresses = []string{args.Contract.String()}
}
// Begin tx
tx, err := r.backend.DB.Beginx()
if err != nil { if err != nil {
return nil, err return nil, err
} }
var logIndexInBlock uint = 0 filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
receipts := make(types.Receipts, len(receiptsBytes)) if err != nil {
for index, receiptBytes := range receiptsBytes {
receiptCID := receiptCIDs[index]
receipt := new(types.Receipt)
if err := rlp.DecodeBytes(receiptBytes, receipt); err != nil {
return nil, err return nil, err
} }
receipts[index] = receipt if err = tx.Commit(); err != nil {
for _, log := range receipt.Logs { return nil, err
log.Index = logIndexInBlock }
logIndexInBlock++
if args.Contract == nil || *args.Contract == log.Address { rctLog := decomposeGQLLogs(filteredLogs)
if err != nil {
return nil, err
}
ret := make([]*Log, 0, 10)
for _, l := range rctLog {
ret = append(ret, &Log{ ret = append(ret, &Log{
backend: r.backend, backend: r.backend,
log: log, log: l.Log,
cid: receiptCID, cid: l.CID,
ipldBlock: receiptBytes, receiptCID: l.RctCID,
ipldBlock: l.LogLeafData,
transaction: &Transaction{ transaction: &Transaction{
hash: txs[index], hash: l.Log.TxHash,
}, },
status: l.RctStatus,
}) })
} }
}
}
return &ret, nil return &ret, nil
} }
type logsCID struct {
Log *types.Log
CID string
RctCID string
LogLeafData []byte
RctStatus uint64
}
// decomposeGQLLogs return logs for graphql.
func decomposeGQLLogs(logCIDs []eth.LogResult) []logsCID {
logs := make([]logsCID, len(logCIDs))
for i, l := range logCIDs {
topics := make([]common.Hash, 0)
if l.Topic0 != "" {
topics = append(topics, common.HexToHash(l.Topic0))
}
if l.Topic1 != "" {
topics = append(topics, common.HexToHash(l.Topic1))
}
if l.Topic2 != "" {
topics = append(topics, common.HexToHash(l.Topic2))
}
if l.Topic3 != "" {
topics = append(topics, common.HexToHash(l.Topic3))
}
logs[i] = logsCID{
Log: &types.Log{
Address: common.HexToAddress(l.Address),
Topics: topics,
Data: l.Data,
Index: uint(l.Index),
TxHash: common.HexToHash(l.TxHash),
},
CID: l.LeafCID,
RctCID: l.RctCID,
LogLeafData: l.LogLeafData,
RctStatus: l.RctStatus,
}
}
return logs
}

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -28,20 +29,33 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
pgipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
"github.com/vulcanize/ipld-eth-server/pkg/graphql" "github.com/vulcanize/ipld-eth-server/pkg/graphql"
) )
// SetupDB is use to setup a db for watcher tests
func SetupDB() (*postgres.DB, error) {
uri := postgres.DbConnectionString(postgres.ConnectionParams{
User: "vdbm",
Password: "password",
Hostname: "localhost",
Name: "vulcanize_testing",
Port: 8077,
})
return postgres.NewDB(uri, postgres.ConnectionConfig{}, node.Info{})
}
var _ = Describe("GraphQL", func() { var _ = Describe("GraphQL", func() {
const ( const (
gqlEndPoint = "127.0.0.1:8083" gqlEndPoint = "127.0.0.1:8083"
@ -66,13 +80,18 @@ var _ = Describe("GraphQL", func() {
It("test init", func() { It("test init", func() {
var err error var err error
db, err = shared.SetupDB() db, err = SetupDB()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
transformer := eth2.NewStateDiffTransformer(chainConfig, db) transformer := indexer.NewStateDiffIndexer(chainConfig, db)
backend, err = eth.NewEthBackend(db, &eth.Config{ backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig, ChainConfig: chainConfig,
VmConfig: vm.Config{}, VmConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), RPCGasCap: big.NewInt(10000000000),
CacheConfig: pgipfsethdb.CacheConfig{
Name: "graphql_test",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
},
}) })
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -109,35 +128,43 @@ var _ = Describe("GraphQL", func() {
var diff statediff.StateObject var diff statediff.StateObject
diff, err = builder.BuildStateDiffObject(args, params) diff, err = builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
diffRlp, err := rlp.EncodeToBytes(diff)
tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
blockRlp, err := rlp.EncodeToBytes(block)
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
receiptsRlp, err := rlp.EncodeToBytes(rcts)
Expect(err).ToNot(HaveOccurred())
payload := statediff.Payload{
StateObjectRlp: diffRlp,
BlockRlp: blockRlp,
ReceiptsRlp: receiptsRlp,
TotalDifficulty: mockTD,
} }
_, err = transformer.Transform(0, payload) err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
} }
// Insert some non-canonical data into the database so that we test our ability to discern canonicity // Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := eth2.NewIPLDPublisher(db) indexAndPublisher := indexer.NewStateDiffIndexer(chainConfig, db)
blockHash = test_helpers.MockBlock.Hash() blockHash = test_helpers.MockBlock.Hash()
contractAddress = test_helpers.ContractAddr contractAddress = test_helpers.ContractAddr
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload) tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child // The non-canonical header has a child
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayloadForChild) tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
ccHash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred())
err = tx.Close(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{}) graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
@ -156,7 +183,7 @@ var _ = Describe("GraphQL", func() {
Describe("eth_getLogs", func() { Describe("eth_getLogs", func() {
It("Retrieves logs that matches the provided blockHash and contract address", func() { It("Retrieves logs that matches the provided blockHash and contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, contractAddress) logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{ expectedLogs := []graphql.LogResponse{
@ -164,13 +191,39 @@ var _ = Describe("GraphQL", func() {
Topics: test_helpers.MockLog1.Topics, Topics: test_helpers.MockLog1.Topics,
Data: hexutil.Bytes(test_helpers.MockLog1.Data), Data: hexutil.Bytes(test_helpers.MockLog1.Data),
Transaction: graphql.TransactionResp{Hash: test_helpers.MockTransactions[0].Hash()}, Transaction: graphql.TransactionResp{Hash: test_helpers.MockTransactions[0].Hash()},
ReceiptCID: test_helpers.Rct1CID.String(),
Status: int32(test_helpers.MockReceipts[0].Status),
}, },
} }
Expect(logs).To(Equal(expectedLogs)) Expect(logs).To(Equal(expectedLogs))
}) })
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
{
Topics: test_helpers.MockLog6.Topics,
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
Transaction: graphql.TransactionResp{Hash: test_helpers.MockTransactions[3].Hash()},
ReceiptCID: test_helpers.Rct4CID.String(),
Status: int32(test_helpers.MockReceipts[3].Status),
},
}
Expect(logs).To(Equal(expectedLogs))
})
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, nil)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(6))
})
It("Retrieves logs with random hash", func() { It("Retrieves logs with random hash", func() {
logs, err := client.GetLogs(ctx, randomHash, contractAddress) logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0)) Expect(len(logs)).To(Equal(0))
}) })
@ -208,23 +261,3 @@ var _ = Describe("GraphQL", func() {
}) })
}) })
}) })
func publishCode(db *postgres.DB, codeHash common.Hash, code []byte) error {
tx, err := db.Beginx()
if err != nil {
return err
}
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
_ = tx.Rollback()
return err
}
if err := shared.PublishDirect(tx, mhKey, code); err != nil {
_ = tx.Rollback()
return err
}
return tx.Commit()
}

View File

@ -67,11 +67,17 @@ const schema string = `
# Transaction is the transaction that generated this log entry. # Transaction is the transaction that generated this log entry.
transaction: Transaction transaction: Transaction
# CID for the Receipt IPLD block this Log exists in. # CID for the leaf node IPLD block of the log.
cid: String! cid: String!
# IPLD block data for the Receipt this Log exists in. # ReceiptCID for the Receipt IPLD block this Log exists in.
receiptCID: String!
# IPLD block data for the Log Leaf node.
ipldBlock: Bytes! ipldBlock: Bytes!
# Status of the Receipt IPLD block this Log exists in.
status: Int!
} }
# Transaction is an Ethereum transaction. # Transaction is an Ethereum transaction.

View File

@ -29,7 +29,7 @@ import (
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) { func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
srv := rpc.NewServer() srv := rpc.NewServer()
err := node.RegisterApisFromWhitelist(apis, modules, srv, false) err := node.RegisterApis(apis, modules, srv, false)
if err != nil { if err != nil {
utils.Fatalf("Could not register HTTP API: %w", err) utils.Fatalf("Could not register HTTP API: %w", err)
} }

View File

@ -37,7 +37,7 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
// Register all the APIs exposed by the services // Register all the APIs exposed by the services
handler := rpc.NewServer() handler := rpc.NewServer()
err = node.RegisterApisFromWhitelist(apis, modules, handler, exposeAll) err = node.RegisterApis(apis, modules, handler, exposeAll)
if err != nil { if err != nil {
utils.Fatalf("Could not register WS API: %w", err) utils.Fatalf("Could not register WS API: %w", err)
} }

View File

@ -20,10 +20,9 @@ import (
"context" "context"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
) )

View File

@ -23,15 +23,11 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-indexer/utils"
"github.com/vulcanize/ipld-eth-server/pkg/prom" "github.com/vulcanize/ipld-eth-server/pkg/prom"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
@ -39,24 +35,25 @@ import (
// Env variables // Env variables
const ( const (
SERVER_WS_PATH = "SERVER_WS_PATH" serverWsPath = "SERVER_WS_PATH"
SERVER_IPC_PATH = "SERVER_IPC_PATH" serverIpcPath = "SERVER_IPC_PATH"
SERVER_HTTP_PATH = "SERVER_HTTP_PATH" serverHTTPPath = "SERVER_HTTP_PATH"
SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS" serverMaxIdleConnections = "SERVER_MAX_IDLE_CONNECTIONS"
SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS" serverMaxOpenConnections = "SERVER_MAX_OPEN_CONNECTIONS"
SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME" serverMaxConnLifetime = "SERVER_MAX_CONN_LIFETIME"
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR" ethDefaultSenderAddr = "ETH_DEFAULT_SENDER_ADDR"
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP" ethRPCGasCap = "ETH_RPC_GAS_CAP"
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG" ethChainConfig = "ETH_CHAIN_CONFIG"
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF" ethSupportsStatediff = "ETH_SUPPORTS_STATEDIFF"
) )
// Config struct // Config struct
type Config struct { type Config struct {
DB *postgres.DB DB *postgres.DB
DBConfig postgres.Config DBConfig postgres.ConnectionConfig
DBParams postgres.ConnectionParams
WSEnabled bool WSEnabled bool
WSEndpoint string WSEndpoint string
@ -89,17 +86,16 @@ type Config struct {
func NewConfig() (*Config, error) { func NewConfig() (*Config, error) {
c := new(Config) c := new(Config)
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH) viper.BindEnv("ethereum.httpPath", ethHTTPPath)
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR) viper.BindEnv("ethereum.defaultSender", ethDefaultSenderAddr)
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP) viper.BindEnv("ethereum.rpcGasCap", ethRPCGasCap)
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG) viper.BindEnv("ethereum.chainConfig", ethChainConfig)
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF) viper.BindEnv("ethereum.supportsStateDiff", ethSupportsStatediff)
c.DBConfig.Init()
c.dbInit()
ethHTTP := viper.GetString("ethereum.httpPath") ethHTTP := viper.GetString("ethereum.httpPath")
ethHTTPEndpoint := fmt.Sprintf("http://%s", ethHTTP) ethHTTPEndpoint := fmt.Sprintf("http://%s", ethHTTP)
nodeInfo, cli, err := shared.GetEthNodeAndClient(ethHTTPEndpoint) nodeInfo, cli, err := getEthNodeAndClient(ethHTTPEndpoint)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -184,9 +180,13 @@ func NewConfig() (*Config, error) {
c.IpldGraphqlEnabled = ipldGraphqlEnabled c.IpldGraphqlEnabled = ipldGraphqlEnabled
overrideDBConnConfig(&c.DBConfig) overrideDBConnConfig(&c.DBConfig)
serveDB := utils.LoadPostgres(c.DBConfig, nodeInfo, false) serveDB, err := postgres.NewDB(postgres.DbConnectionString(c.DBParams), c.DBConfig, nodeInfo)
prom.RegisterDBCollector(c.DBConfig.Name, serveDB.DB) if err != nil {
c.DB = &serveDB return nil, err
}
prom.RegisterDBCollector(c.DBParams.Name, serveDB.DB)
c.DB = serveDB
defaultSenderStr := viper.GetString("ethereum.defaultSender") defaultSenderStr := viper.GetString("ethereum.defaultSender")
if defaultSenderStr != "" { if defaultSenderStr != "" {
@ -208,11 +208,31 @@ func NewConfig() (*Config, error) {
return c, err return c, err
} }
func overrideDBConnConfig(con *postgres.Config) { func overrideDBConnConfig(con *postgres.ConnectionConfig) {
viper.BindEnv("database.server.maxIdle", SERVER_MAX_IDLE_CONNECTIONS) viper.BindEnv("database.server.maxIdle", serverMaxIdleConnections)
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS) viper.BindEnv("database.server.maxOpen", serverMaxOpenConnections)
viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME) viper.BindEnv("database.server.maxLifetime", serverMaxConnLifetime)
con.MaxIdle = viper.GetInt("database.server.maxIdle") con.MaxIdle = viper.GetInt("database.server.maxIdle")
con.MaxOpen = viper.GetInt("database.server.maxOpen") con.MaxOpen = viper.GetInt("database.server.maxOpen")
con.MaxLifetime = viper.GetInt("database.server.maxLifetime") con.MaxLifetime = viper.GetInt("database.server.maxLifetime")
} }
func (d *Config) dbInit() {
viper.BindEnv("database.name", databaseName)
viper.BindEnv("database.hostname", databaseHostname)
viper.BindEnv("database.port", databasePort)
viper.BindEnv("database.user", databaseUser)
viper.BindEnv("database.password", databasePassword)
viper.BindEnv("database.maxIdle", databaseMaxIdleConnections)
viper.BindEnv("database.maxOpen", databaseMaxOpenConnections)
viper.BindEnv("database.maxLifetime", databaseMaxOpenConnLifetime)
d.DBParams.Name = viper.GetString("database.name")
d.DBParams.Hostname = viper.GetString("database.hostname")
d.DBParams.Port = viper.GetInt("database.port")
d.DBParams.User = viper.GetString("database.user")
d.DBParams.Password = viper.GetString("database.password")
d.DBConfig.MaxIdle = viper.GetInt("database.maxIdle")
d.DBConfig.MaxOpen = viper.GetInt("database.maxOpen")
d.DBConfig.MaxLifetime = viper.GetInt("database.maxLifetime")
}

50
pkg/serve/env.go Normal file
View File

@ -0,0 +1,50 @@
package serve
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/spf13/viper"
)
// Env variables
const (
HTTPTimeout = "HTTP_TIMEOUT"
EthWsPath = "ETH_WS_PATH"
ethHTTPPath = "ETH_HTTP_PATH"
ethNodeID = "ETH_NODE_ID"
ethClientName = "ETH_CLIENT_NAME"
ethGenesisBlock = "ETH_GENESIS_BLOCK"
ethNetworkID = "ETH_NETWORK_ID"
ethChainID = "ETH_CHAIN_ID"
databaseName = "DATABASE_NAME"
databaseHostname = "DATABASE_HOSTNAME"
databasePort = "DATABASE_PORT"
databaseUser = "DATABASE_USER"
databasePassword = "DATABASE_PASSWORD"
databaseMaxIdleConnections = "DATABASE_MAX_IDLE_CONNECTIONS"
databaseMaxOpenConnections = "DATABASE_MAX_OPEN_CONNECTIONS"
databaseMaxOpenConnLifetime = "DATABASE_MAX_CONN_LIFETIME"
)
// GetEthNodeAndClient returns eth node info and client from path url
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
viper.BindEnv("ethereum.nodeID", ethNodeID)
viper.BindEnv("ethereum.clientName", ethClientName)
viper.BindEnv("ethereum.genesisBlock", ethGenesisBlock)
viper.BindEnv("ethereum.networkID", ethNetworkID)
viper.BindEnv("ethereum.chainID", ethChainID)
rpcClient, err := rpc.Dial(path)
if err != nil {
return node.Info{}, nil, err
}
return node.Info{
ID: viper.GetString("ethereum.nodeID"),
ClientName: viper.GetString("ethereum.clientName"),
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
NetworkID: viper.GetString("ethereum.networkID"),
ChainID: viper.GetUint64("ethereum.chainID"),
}, rpcClient, nil
}

View File

@ -20,23 +20,20 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"sync" "sync"
"time"
"github.com/vulcanize/ipld-eth-server/pkg/net"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
ethnode "github.com/ethereum/go-ethereum/node" ethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
pgipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/net"
) )
const ( const (
@ -52,7 +49,7 @@ type Server interface {
APIs() []rpc.API APIs() []rpc.API
Protocols() []p2p.Protocol Protocols() []p2p.Protocol
// Pub-Sub handling event loop // Pub-Sub handling event loop
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload)
// Method to subscribe to the service // Method to subscribe to the service
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
// Method to unsubscribe from the service // Method to unsubscribe from the service
@ -107,6 +104,11 @@ func NewServer(settings *Config) (Server, error) {
VmConfig: vm.Config{}, VmConfig: vm.Config{},
DefaultSender: settings.DefaultSender, DefaultSender: settings.DefaultSender,
RPCGasCap: settings.RPCGasCap, RPCGasCap: settings.RPCGasCap,
CacheConfig: pgipfsethdb.CacheConfig{
Name: "ipld-eth-server",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
},
}) })
return sap, err return sap, err
} }
@ -145,7 +147,7 @@ func (sap *Service) APIs() []rpc.API {
// It filters and sends this data to any subscribers to the service // It filters and sends this data to any subscribers to the service
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process // This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only // and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload) { func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) {
sap.serveWg = wg sap.serveWg = wg
go func() { go func() {
wg.Add(1) wg.Add(1)
@ -164,7 +166,7 @@ func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.
} }
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions // filterAndServe filters the payload according to each subscription type and sends to the subscriptions
func (sap *Service) filterAndServe(payload eth2.ConvertedPayload) { func (sap *Service) filterAndServe(payload eth.ConvertedPayload) {
log.Debug("sending eth ipld payload to subscriptions") log.Debug("sending eth ipld payload to subscriptions")
sap.Lock() sap.Lock()
sap.serveWg.Add(1) sap.serveWg.Add(1)
@ -337,7 +339,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
func (sap *Service) Start() error { func (sap *Service) Start() error {
log.Info("starting eth ipld server") log.Info("starting eth ipld server")
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
payloadChan := make(chan eth2.ConvertedPayload, PayloadChanBufferSize) payloadChan := make(chan eth.ConvertedPayload, PayloadChanBufferSize)
sap.Serve(wg, payloadChan) sap.Serve(wg, payloadChan)
return nil return nil
} }

View File

@ -18,13 +18,13 @@ package shared
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-blockstore"
"github.com/ipfs/go-ipfs-ds-help" "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format" node "github.com/ipfs/go-ipld-format"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
) )
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer // HandleZeroAddrPointer will return an emtpy string for a nil address pointer

View File

@ -19,7 +19,7 @@ package shared
import ( import (
"bytes" "bytes"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs" "github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
) )
// IPLDsContainBytes used to check if a list of strings contains a particular string // IPLDsContainBytes used to check if a list of strings contains a particular string

View File

@ -19,13 +19,12 @@ package test_config
import ( import (
"errors" "errors"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
) )
var DBConfig postgres.Config var DBConfig postgres.ConnectionParams
func init() { func init() {
setTestConfig() setTestConfig()
@ -53,7 +52,7 @@ func setTestConfig() {
port := vip.GetInt("database.port") port := vip.GetInt("database.port")
name := vip.GetString("database.name") name := vip.GetString("database.name")
DBConfig = postgres.Config{ DBConfig = postgres.ConnectionParams{
Hostname: hn, Hostname: hn,
Name: name, Name: name,
Port: port, Port: port,