forked from cerc-io/ipld-eth-server
Update to use new schema
This commit is contained in:
parent
072ba1edcc
commit
8df8b50cb1
@ -19,7 +19,7 @@ services:
|
||||
|
||||
ipld-eth-db:
|
||||
restart: always
|
||||
image: vulcanize/ipld-eth-db:v0.2.0
|
||||
image: vulcanize/ipld-eth-db:v3.0.6
|
||||
environment:
|
||||
POSTGRES_USER: "vdbm"
|
||||
POSTGRES_DB: "vulcanize_testing"
|
||||
|
@ -39,14 +39,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
||||
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
validator "github.com/vulcanize/eth-ipfs-state-validator/pkg"
|
||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/postgres"
|
||||
|
||||
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
||||
)
|
||||
|
||||
@ -64,24 +65,24 @@ var (
|
||||
const (
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
WHERE id = (SELECT canonical_header_id($1))`
|
||||
WHERE block_hash = (SELECT canonical_header_id($1))`
|
||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
WHERE id = (SELECT canonical_header_id($1))`
|
||||
RetrieveTD = `SELECT td FROM eth.header_cids
|
||||
WHERE block_hash = (SELECT canonical_header_id($1))`
|
||||
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
|
||||
WHERE blocks.key = transaction_cids.mh_key
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.tx_hash = $1`
|
||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||
WHERE state_accounts.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
WHERE state_accounts.header_id = state_cids.header_id AND state_accounts.state_path = state_cids.state_path
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_leaf_key = $1
|
||||
AND block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2)
|
||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
||||
AND header_cids.block_hash = (SELECT canonical_header_id(block_number))
|
||||
ORDER BY block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
||||
@ -313,17 +314,17 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
var headerIPLD ipfs.BlockModel
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@ -337,7 +338,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
var uncleIPLDs []ipfs.BlockModel
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -352,7 +353,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
var txIPLDs []ipfs.BlockModel
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -367,7 +368,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
var rctIPLDs []ipfs.BlockModel
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -409,17 +410,17 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
var headerIPLD ipfs.BlockModel
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@ -433,7 +434,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
var uncleIPLDs []ipfs.BlockModel
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@ -451,7 +452,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
var txIPLDs []ipfs.BlockModel
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@ -469,7 +470,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
var rctIPLDs []ipfs.BlockModel
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@ -735,10 +736,10 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
ethServerShared.Rollback(tx)
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
if filter.HeaderFilter.Uncles {
|
||||
// Retrieve uncle cids for this header id
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -111,7 +111,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
}
|
||||
// Retrieve cached trx CIDs
|
||||
if !filter.TxFilter.Off {
|
||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.ID)
|
||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("transaction cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -120,13 +120,13 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
trxIds := make([]int64, len(cw.Transactions))
|
||||
for j, tx := range cw.Transactions {
|
||||
trxIds[j] = tx.ID
|
||||
trxHashes := make([]string, len(cw.Transactions))
|
||||
for j, t := range cw.Transactions {
|
||||
trxHashes[j] = t.TxHash
|
||||
}
|
||||
// Retrieve cached receipt CIDs
|
||||
if !filter.ReceiptFilter.Off {
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.ID, trxIds)
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes)
|
||||
if err != nil {
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -137,7 +137,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
}
|
||||
// Retrieve cached state CIDs
|
||||
if !filter.StateFilter.Off {
|
||||
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.ID)
|
||||
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("state cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -148,7 +148,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
}
|
||||
// Retrieve cached storage CIDs
|
||||
if !filter.StorageFilter.Off {
|
||||
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.ID)
|
||||
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("storage cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -167,32 +167,33 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
|
||||
log.Debug("retrieving header cids for block ", blockNumber)
|
||||
headers := make([]models.HeaderModel, 0)
|
||||
pgStr := `SELECT * FROM eth.header_cids
|
||||
pgStr := `SELECT CAST(block_number as Text), block_hash,parent_hash,cid,mh_key,CAST(td as Text),node_id,
|
||||
CAST(reward as Text), state_root,uncle_root,tx_root,receipt_root,bloom,timestamp,times_validated,
|
||||
coinbase FROM eth.header_cids
|
||||
WHERE block_number = $1`
|
||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
|
||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]models.UncleModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
||||
log.Debug("retrieving uncle cids for block id ", headerID)
|
||||
headers := make([]models.UncleModel, 0)
|
||||
pgStr := `SELECT * FROM eth.uncle_cids
|
||||
pgStr := `SELECT header_id,block_hash,parent_hash,cid,mh_key, CAST(reward as text) FROM eth.uncle_cids
|
||||
WHERE header_id = $1`
|
||||
return headers, tx.Select(&headers, pgStr, headerID)
|
||||
}
|
||||
|
||||
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
||||
// also returns the ids for the returned transaction cids
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]models.TxModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID string) ([]models.TxModel, error) {
|
||||
log.Debug("retrieving transaction cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
results := make([]models.TxModel, 0)
|
||||
id := 1
|
||||
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
|
||||
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
|
||||
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.id = $%d`, id)
|
||||
pgStr := fmt.Sprintf(`SELECT transaction_cids.tx_hash, transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key,
|
||||
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
WHERE header_cids.block_hash = $%d`, id)
|
||||
args = append(args, headerID)
|
||||
id++
|
||||
if len(txFilter.Dst) > 0 {
|
||||
@ -241,9 +242,9 @@ func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter Rec
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, trxIds []int64) (string, []interface{}) {
|
||||
rctCond := " AND (receipt_cids.id = ANY ( "
|
||||
logQuery := "SELECT receipt_id FROM eth.log_cids WHERE"
|
||||
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
|
||||
rctCond := " AND (receipt_cids.tx_id = ANY ( "
|
||||
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
|
||||
if len(rctFilter.LogAddresses) > 0 {
|
||||
// Filter on log contract addresses if there are any
|
||||
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
|
||||
@ -257,10 +258,10 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
||||
|
||||
pgStr += ")"
|
||||
|
||||
// Filter on txIDs if there are any and we are matching txs
|
||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
|
||||
args = append(args, pq.Array(trxIds))
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else { // If there are no contract addresses to filter on
|
||||
@ -269,17 +270,17 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
||||
pgStr += rctCond + logQuery
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
|
||||
pgStr += ")"
|
||||
// Filter on txIDs if there are any and we are matching txs
|
||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
|
||||
args = append(args, pq.Array(trxIds))
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
|
||||
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
// If there are no contract addresses or topics to filter on,
|
||||
// Filter on txIDs if there are any and we are matching txs
|
||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, *id)
|
||||
args = append(args, pq.Array(trxIds))
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,19 +289,19 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
||||
|
||||
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]models.ReceiptModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND header_cids.id = $1`
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND header_cids.block_hash = $1`
|
||||
id := 2
|
||||
args = append(args, headerID)
|
||||
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxIds)
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
|
||||
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
receiptCids := make([]models.ReceiptModel, 0)
|
||||
@ -313,13 +314,13 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
id := 1
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.receipt_id,
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||
WHERE eth.log_cids.receipt_id = receipt_cids.id
|
||||
AND receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
|
||||
|
||||
args = append(args, blockHash.String())
|
||||
@ -342,14 +343,14 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.receipt_id,
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
header_cids.block_hash, header_cids.block_number
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE eth.log_cids.receipt_id = receipt_cids.id
|
||||
AND receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id`
|
||||
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
@ -376,13 +377,13 @@ func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilte
|
||||
|
||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]models.ReceiptModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 5)
|
||||
pgStr := `SELECT receipt_cids.id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id`
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
@ -395,7 +396,7 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
|
||||
id++
|
||||
}
|
||||
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxIds)
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, txHashes)
|
||||
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
receiptCids := make([]models.ReceiptModel, 0)
|
||||
@ -412,13 +413,13 @@ func hasTopics(topics [][]string) bool {
|
||||
}
|
||||
|
||||
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
|
||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]models.StateNodeModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
||||
log.Debug("retrieving state cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 2)
|
||||
pgStr := `SELECT state_cids.id, state_cids.header_id,
|
||||
pgStr := `SELECT state_cids.header_id,
|
||||
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.id = $1`
|
||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
WHERE header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
addrLen := len(stateFilter.Addresses)
|
||||
if addrLen > 0 {
|
||||
@ -437,15 +438,15 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
|
||||
}
|
||||
|
||||
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
|
||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||
log.Debug("retrieving storage cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
||||
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, state_cids.state_leaf_key
|
||||
pgStr := `SELECT storage_cids.header_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
||||
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path, state_cids.state_leaf_key
|
||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||
WHERE storage_cids.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
AND header_cids.id = $1`
|
||||
WHERE storage_cids.header_id = state_cids.header_id AND storage_cids.state_path = state_cids.state_path
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
id := 2
|
||||
addrLen := len(storageFilter.Addresses)
|
||||
@ -496,23 +497,23 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txIDs := make([]int64, len(txCIDs))
|
||||
txHashes := make([]string, len(txCIDs))
|
||||
for i, txCID := range txCIDs {
|
||||
txIDs[i] = txCID.ID
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -549,23 +550,23 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
||||
return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
|
||||
}
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txIDs := make([]int64, len(txCIDs))
|
||||
txHashes := make([]string, len(txCIDs))
|
||||
for i, txCID := range txCIDs {
|
||||
txIDs[i] = txCID.ID
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -575,14 +576,14 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
||||
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
|
||||
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
||||
pgStr := `SELECT * FROM eth.header_cids
|
||||
pgStr := `SELECT block_hash,cid,mh_key FROM eth.header_cids
|
||||
WHERE block_hash = $1`
|
||||
var headerCID models.HeaderModel
|
||||
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
||||
}
|
||||
|
||||
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]models.TxModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.TxModel, error) {
|
||||
log.Debug("retrieving tx cids for block id ", headerID)
|
||||
pgStr := `SELECT * FROM eth.transaction_cids
|
||||
WHERE header_id = $1
|
||||
@ -592,14 +593,14 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) (
|
||||
}
|
||||
|
||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]models.ReceiptModel, error) {
|
||||
log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
|
||||
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids
|
||||
WHERE tx_id = ANY($1::INTEGER[])
|
||||
AND receipt_cids.tx_id = transaction_cids.id
|
||||
WHERE tx_id = ANY($1)
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
ORDER BY transaction_cids.index`
|
||||
var rctCIDs []models.ReceiptModel
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
||||
}
|
||||
|
@ -23,12 +23,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
||||
)
|
||||
|
||||
// Filterer interface for substituing mocks in tests
|
||||
@ -82,12 +81,12 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Header = ipfs.BlockModel{
|
||||
response.Header = models.IPLDModel{
|
||||
Data: headerRLP,
|
||||
CID: cid.String(),
|
||||
Key: cid.String(),
|
||||
}
|
||||
if headerFilter.Uncles {
|
||||
response.Uncles = make([]ipfs.BlockModel, len(payload.Block.Body().Uncles))
|
||||
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
|
||||
for i, uncle := range payload.Block.Body().Uncles {
|
||||
uncleRlp, err := rlp.EncodeToBytes(uncle)
|
||||
if err != nil {
|
||||
@ -97,9 +96,9 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Uncles[i] = ipfs.BlockModel{
|
||||
response.Uncles[i] = models.IPLDModel{
|
||||
Data: uncleRlp,
|
||||
CID: cid.String(),
|
||||
Key: cid.String(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -119,7 +118,7 @@ func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLD
|
||||
if !trxFilter.Off {
|
||||
trxLen := len(payload.Block.Body().Transactions)
|
||||
trxHashes = make([]common.Hash, 0, trxLen)
|
||||
response.Transactions = make([]ipfs.BlockModel, 0, trxLen)
|
||||
response.Transactions = make([]models.IPLDModel, 0, trxLen)
|
||||
for i, trx := range payload.Block.Body().Transactions {
|
||||
// TODO: check if want corresponding receipt and if we do we must include this transaction
|
||||
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
|
||||
@ -132,9 +131,9 @@ func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLD
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Transactions = append(response.Transactions, ipfs.BlockModel{
|
||||
response.Transactions = append(response.Transactions, models.IPLDModel{
|
||||
Data: data,
|
||||
CID: cid.String(),
|
||||
Key: cid.String(),
|
||||
})
|
||||
trxHashes = append(trxHashes, trx.Hash())
|
||||
}
|
||||
@ -164,7 +163,7 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
|
||||
|
||||
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
|
||||
if !receiptFilter.Off {
|
||||
response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
|
||||
response.Receipts = make([]models.IPLDModel, 0, len(payload.Receipts))
|
||||
rctLeafCID, rctIPLDData, err := GetRctLeafNodeData(payload.Receipts)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -183,9 +182,9 @@ func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *
|
||||
|
||||
// TODO: Verify this filter logic.
|
||||
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
||||
response.Receipts = append(response.Receipts, ipfs.BlockModel{
|
||||
response.Receipts = append(response.Receipts, models.IPLDModel{
|
||||
Data: rctIPLDData[idx],
|
||||
CID: rctLeafCID[idx].String(),
|
||||
Key: rctLeafCID[idx].String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -282,9 +281,9 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
||||
response.StateNodes = append(response.StateNodes, StateNode{
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
Path: stateNode.Path,
|
||||
IPLD: ipfs.BlockModel{
|
||||
IPLD: models.IPLDModel{
|
||||
Data: stateNode.NodeValue,
|
||||
CID: cid.String(),
|
||||
Key: cid.String(),
|
||||
},
|
||||
Type: stateNode.NodeType,
|
||||
})
|
||||
@ -300,9 +299,9 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
||||
response.StorageNodes = append(response.StorageNodes, StorageNode{
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
||||
IPLD: ipfs.BlockModel{
|
||||
IPLD: models.IPLDModel{
|
||||
Data: storageNode.NodeValue,
|
||||
CID: cid.String(),
|
||||
Key: cid.String(),
|
||||
},
|
||||
Type: storageNode.NodeType,
|
||||
Path: storageNode.Path,
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -101,65 +100,65 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
|
||||
}
|
||||
|
||||
// FetchHeaders fetches headers
|
||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (ipfs.BlockModel, error) {
|
||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
||||
log.Debug("fetching header ipld")
|
||||
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
if err != nil {
|
||||
return ipfs.BlockModel{}, err
|
||||
return models.IPLDModel{}, err
|
||||
}
|
||||
return ipfs.BlockModel{
|
||||
return models.IPLDModel{
|
||||
Data: headerBytes,
|
||||
CID: c.CID,
|
||||
Key: c.CID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchUncles fetches uncles
|
||||
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]ipfs.BlockModel, error) {
|
||||
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching uncle iplds")
|
||||
uncleIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleIPLDs[i] = ipfs.BlockModel{
|
||||
uncleIPLDs[i] = models.IPLDModel{
|
||||
Data: uncleBytes,
|
||||
CID: c.CID,
|
||||
Key: c.CID,
|
||||
}
|
||||
}
|
||||
return uncleIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchTrxs fetches transactions
|
||||
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]ipfs.BlockModel, error) {
|
||||
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
trxIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxIPLDs[i] = ipfs.BlockModel{
|
||||
trxIPLDs[i] = models.IPLDModel{
|
||||
Data: txBytes,
|
||||
CID: c.CID,
|
||||
Key: c.CID,
|
||||
}
|
||||
}
|
||||
return trxIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchRcts fetches receipts
|
||||
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]ipfs.BlockModel, error) {
|
||||
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctIPLDs := make([]ipfs.BlockModel, len(cids))
|
||||
rctIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.LeafMhKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//nodeVal, err := DecodeLeafNode(rctBytes)
|
||||
rctIPLDs[i] = ipfs.BlockModel{
|
||||
rctIPLDs[i] = models.IPLDModel{
|
||||
Data: rctBytes,
|
||||
CID: c.LeafCID,
|
||||
Key: c.LeafCID,
|
||||
}
|
||||
}
|
||||
return rctIPLDs, nil
|
||||
@ -178,9 +177,9 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]S
|
||||
return nil, err
|
||||
}
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
IPLD: models.IPLDModel{
|
||||
Data: stateBytes,
|
||||
CID: stateNode.CID,
|
||||
Key: stateNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||
Type: ResolveToNodeType(stateNode.NodeType),
|
||||
@ -203,9 +202,9 @@ func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithSta
|
||||
return nil, err
|
||||
}
|
||||
storageNodes = append(storageNodes, StorageNode{
|
||||
IPLD: ipfs.BlockModel{
|
||||
IPLD: models.IPLDModel{
|
||||
Data: storageBytes,
|
||||
CID: storageNode.CID,
|
||||
Key: storageNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||
|
@ -19,7 +19,7 @@ package eth
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/trie"
|
||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
@ -52,12 +52,12 @@ const (
|
||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
WHERE block_number = $1`
|
||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||
@ -70,13 +70,13 @@ const (
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
WHERE block_number = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
@ -86,42 +86,42 @@ const (
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE block_number = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2)
|
||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
||||
AND header_cids.block_hash = (SELECT canonical_header_id(block_number))
|
||||
ORDER BY block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND block_number <= $2
|
||||
@ -129,8 +129,8 @@ const (
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.state_cids ON (storage_cids.header_id = state_cids.header_id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
@ -139,15 +139,15 @@ const (
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
||||
INNER JOIN eth.state_cids ON (storage_cids.header_id = state_cids.header_id)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $3)
|
||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
||||
AND header_cids.block_hash = (SELECT canonical_header_id(block_number))
|
||||
ORDER BY block_number DESC
|
||||
LIMIT 1`
|
||||
)
|
||||
@ -333,7 +333,7 @@ func DecodeLeafNode(node []byte) ([]byte, error) {
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ty, err := trie.CheckKeyType(nodeElements)
|
||||
ty, err := trie_helpers.CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -197,10 +196,10 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Mes
|
||||
type IPLDs struct {
|
||||
BlockNumber *big.Int
|
||||
TotalDifficulty *big.Int
|
||||
Header ipfs.BlockModel
|
||||
Uncles []ipfs.BlockModel
|
||||
Transactions []ipfs.BlockModel
|
||||
Receipts []ipfs.BlockModel
|
||||
Header models.IPLDModel
|
||||
Uncles []models.IPLDModel
|
||||
Transactions []models.IPLDModel
|
||||
Receipts []models.IPLDModel
|
||||
StateNodes []StateNode
|
||||
StorageNodes []StorageNode
|
||||
}
|
||||
@ -209,7 +208,7 @@ type StateNode struct {
|
||||
Type sdtypes.NodeType
|
||||
StateLeafKey common.Hash
|
||||
Path []byte
|
||||
IPLD ipfs.BlockModel
|
||||
IPLD models.IPLDModel
|
||||
}
|
||||
|
||||
type StorageNode struct {
|
||||
@ -217,7 +216,7 @@ type StorageNode struct {
|
||||
StateLeafKey common.Hash
|
||||
StorageLeafKey common.Hash
|
||||
Path []byte
|
||||
IPLD ipfs.BlockModel
|
||||
IPLD models.IPLDModel
|
||||
}
|
||||
|
||||
// CIDWrapper is used to direct fetching of IPLDs from IPFS
|
||||
@ -249,7 +248,7 @@ type ConvertedPayload struct {
|
||||
// LogResult represent a log.
|
||||
type LogResult struct {
|
||||
LeafCID string `db:"leaf_cid"`
|
||||
ReceiptID int64 `db:"receipt_id"`
|
||||
ReceiptID int64 `db:"rct_id"`
|
||||
Address string `db:"address"`
|
||||
Index int64 `db:"index"`
|
||||
Data []byte `db:"log_data"`
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
||||
@ -83,7 +82,7 @@ func (api *PublicServerAPI) Stream(ctx context.Context, params eth.SubscriptionS
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// Chain returns the chain type that this watcher instance supports
|
||||
func (api *PublicServerAPI) Chain() shared.ChainType {
|
||||
return shared.Ethereum
|
||||
}
|
||||
// // Chain returns the chain type that this watcher instance supports
|
||||
// func (api *PublicServerAPI) Chain() shared.ChainType {
|
||||
// return shared.Ethereum
|
||||
// }
|
||||
|
@ -1,5 +1,5 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
// Copyright © 2022 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
|
@ -18,7 +18,7 @@ package shared
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
"github.com/ipfs/go-cid"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
|
@ -19,11 +19,11 @@ package shared
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
)
|
||||
|
||||
// IPLDsContainBytes used to check if a list of strings contains a particular string
|
||||
func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
|
||||
func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool {
|
||||
for _, ipld := range iplds {
|
||||
if bytes.Equal(ipld.Data, b) {
|
||||
return true
|
||||
|
@ -19,12 +19,12 @@ package test_config
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var DBConfig postgres.ConnectionParams
|
||||
var DBConfig postgres.Config
|
||||
|
||||
func init() {
|
||||
setTestConfig()
|
||||
@ -52,9 +52,9 @@ func setTestConfig() {
|
||||
port := vip.GetInt("database.port")
|
||||
name := vip.GetString("database.name")
|
||||
|
||||
DBConfig = postgres.ConnectionParams{
|
||||
Hostname: hn,
|
||||
Name: name,
|
||||
Port: port,
|
||||
DBConfig = postgres.Config{
|
||||
Hostname: hn,
|
||||
DatabaseName: name,
|
||||
Port: port,
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user