Merge pull request #152 from vulcanize/pm-v4-single-node
Updates to use v4 db schema
This commit is contained in:
commit
037131528c
12
.github/workflows/on-pr.yaml
vendored
12
.github/workflows/on-pr.yaml
vendored
@ -60,8 +60,8 @@ jobs:
|
|||||||
integrationtest:
|
integrationtest:
|
||||||
name: Run integration tests
|
name: Run integration tests
|
||||||
env:
|
env:
|
||||||
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
STACK_ORCHESTRATOR_REF: 38545ad36144800d16d61e53050bb8e784729f26
|
||||||
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
GO_ETHEREUM_REF: ef8c9f2580dd577a7c5eca538fb0ed64d53dc4a4
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
DB_WRITE: true
|
DB_WRITE: true
|
||||||
ETH_FORWARD_ETH_CALLS: false
|
ETH_FORWARD_ETH_CALLS: false
|
||||||
@ -127,8 +127,8 @@ jobs:
|
|||||||
integrationtest_forwardethcalls:
|
integrationtest_forwardethcalls:
|
||||||
name: Run integration tests for direct proxy fall-through of eth_calls
|
name: Run integration tests for direct proxy fall-through of eth_calls
|
||||||
env:
|
env:
|
||||||
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
STACK_ORCHESTRATOR_REF: 38545ad36144800d16d61e53050bb8e784729f26
|
||||||
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
GO_ETHEREUM_REF: ef8c9f2580dd577a7c5eca538fb0ed64d53dc4a4
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
DB_WRITE: false
|
DB_WRITE: false
|
||||||
ETH_FORWARD_ETH_CALLS: true
|
ETH_FORWARD_ETH_CALLS: true
|
||||||
@ -194,8 +194,8 @@ jobs:
|
|||||||
integrationtest_watchedaddress_gapfillingservice:
|
integrationtest_watchedaddress_gapfillingservice:
|
||||||
name: Run integration tests for watched addresses with gap filling service enabled
|
name: Run integration tests for watched addresses with gap filling service enabled
|
||||||
env:
|
env:
|
||||||
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
STACK_ORCHESTRATOR_REF: 38545ad36144800d16d61e53050bb8e784729f26
|
||||||
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
GO_ETHEREUM_REF: ef8c9f2580dd577a7c5eca538fb0ed64d53dc4a4
|
||||||
GOPATH: /tmp/go
|
GOPATH: /tmp/go
|
||||||
DB_WRITE: true
|
DB_WRITE: true
|
||||||
ETH_FORWARD_ETH_CALLS: false
|
ETH_FORWARD_ETH_CALLS: false
|
||||||
|
22
.github/workflows/run_unit_test.sh
vendored
22
.github/workflows/run_unit_test.sh
vendored
@ -8,22 +8,30 @@ temp_dir=$(mktemp -d)
|
|||||||
cd $temp_dir
|
cd $temp_dir
|
||||||
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
||||||
cd ipld-eth-server
|
cd ipld-eth-server
|
||||||
|
mkdir -p out
|
||||||
|
|
||||||
## Remove the branch and github related info. This way future runs wont be confused.
|
## Remove the branch and github related info. This way future runs wont be confused.
|
||||||
rm -f /tmp/git_head_ref /tmp/git_repository
|
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||||
|
|
||||||
# Spin up DB
|
# Remove existing docker-tsdb directory
|
||||||
docker-compose up -d ipld-eth-db
|
rm -rf out/docker-tsdb/
|
||||||
trap "docker-compose down --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
|
||||||
sleep 10
|
# Copy over files to setup TimescaleDB
|
||||||
|
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
|
||||||
|
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
|
||||||
|
docker rm -v $ID
|
||||||
|
|
||||||
|
# Spin up TimescaleDB
|
||||||
|
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml up ipld-eth-db
|
||||||
|
trap "docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||||
|
sleep 45
|
||||||
|
|
||||||
# Remove old logs so there's no confusion, then run test
|
# Remove old logs so there's no confusion, then run test
|
||||||
rm -f /tmp/test.log /tmp/return_test.txt
|
rm -f /tmp/test.log /tmp/return_test.txt
|
||||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8066 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing_v4 make test > /tmp/test.log
|
||||||
echo $? > /tmp/return_test.txt
|
echo $? > /tmp/return_test.txt
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
docker-compose down -v --remove-orphans
|
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes
|
||||||
cd $start_dir
|
cd $start_dir
|
||||||
rm -fr $temp_dir
|
rm -fr $temp_dir
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
validator "github.com/vulcanize/eth-ipfs-state-validator/v4/pkg"
|
||||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v4/postgres"
|
||||||
|
|
||||||
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||||
)
|
)
|
||||||
|
@ -2,17 +2,16 @@ version: '3.2'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
ipld-eth-db:
|
ipld-eth-db:
|
||||||
restart: always
|
restart: on-failure
|
||||||
image: vulcanize/ipld-eth-db:v3.2.0
|
depends_on:
|
||||||
|
- access-node
|
||||||
|
image: vulcanize/ipld-eth-db:v4.1.1-alpha
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
POSTGRES_DB: "vulcanize_testing"
|
DATABASE_NAME: "vulcanize_testing_v4"
|
||||||
POSTGRES_PASSWORD: "password"
|
DATABASE_PASSWORD: "password"
|
||||||
volumes:
|
DATABASE_HOSTNAME: "access-node"
|
||||||
- vdb_db_eth_server:/var/lib/postgresql/data
|
DATABASE_PORT: 5432
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8077:5432"
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
|
|
||||||
eth-server:
|
eth-server:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
6
go.mod
6
go.mod
@ -23,9 +23,9 @@ require (
|
|||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/spf13/cobra v1.4.0
|
||||||
github.com/spf13/viper v1.11.0
|
github.com/spf13/viper v1.11.0
|
||||||
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.0
|
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.0-alpha
|
||||||
github.com/vulcanize/gap-filler v0.3.1
|
github.com/vulcanize/gap-filler v0.3.1
|
||||||
github.com/vulcanize/ipfs-ethdb/v3 v3.0.1
|
github.com/vulcanize/ipfs-ethdb/v4 v4.0.0-alpha
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.10.17 => github.com/vulcanize/go-ethereum v1.10.17-statediff-3.2.1
|
replace github.com/ethereum/go-ethereum v1.10.17 => github.com/vulcanize/go-ethereum v1.10.17-statediff-4.0.1-alpha
|
||||||
|
12
go.sum
12
go.sum
@ -1834,14 +1834,14 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU
|
|||||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.0 h1:hQcbewsJX5Xpd3xdE9n3JpGyuzUqMhwhpQCpbN+9Uiw=
|
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.0-alpha h1:9TtxGBNl/hVQ8wegrZx//UnAMivImbcNcYTQxHfhjlQ=
|
||||||
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.0/go.mod h1:CuuygZPciwwLV5vNY1LcIi0PZxXZbOmicw1/IkrPpPI=
|
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.0-alpha/go.mod h1:cY+XmYdOtBc84s02GdDTFoybJF+mR7WDcr3Z7l4Fb1g=
|
||||||
github.com/vulcanize/gap-filler v0.3.1 h1:N5d+jCJo/VTWFvBSbTD7biRhK/OqDZzi1tgA85SIBKs=
|
github.com/vulcanize/gap-filler v0.3.1 h1:N5d+jCJo/VTWFvBSbTD7biRhK/OqDZzi1tgA85SIBKs=
|
||||||
github.com/vulcanize/gap-filler v0.3.1/go.mod h1:qowG1cgshVpBqMokiWro/1xhh0uypw7oAu8FQ42JMy4=
|
github.com/vulcanize/gap-filler v0.3.1/go.mod h1:qowG1cgshVpBqMokiWro/1xhh0uypw7oAu8FQ42JMy4=
|
||||||
github.com/vulcanize/go-ethereum v1.10.17-statediff-3.2.1 h1:BUIaiqqsWM61KfzIowqrGJ8TM2SWwmral4iJ/nhTSe0=
|
github.com/vulcanize/go-ethereum v1.10.17-statediff-4.0.1-alpha h1:ANyTG2JJkOaQD3IqkLLl2OjtTC+PKlrdmy6HIddpqZw=
|
||||||
github.com/vulcanize/go-ethereum v1.10.17-statediff-3.2.1/go.mod h1:mDwZX+QoWdqzQo6SDG3YVqCYACutcSG6uzpziMvTu28=
|
github.com/vulcanize/go-ethereum v1.10.17-statediff-4.0.1-alpha/go.mod h1:mDwZX+QoWdqzQo6SDG3YVqCYACutcSG6uzpziMvTu28=
|
||||||
github.com/vulcanize/ipfs-ethdb/v3 v3.0.1 h1:gm+6SgxcNOCz6mgwvhsxORLXsr9yIFvbJDuoAK85ONs=
|
github.com/vulcanize/ipfs-ethdb/v4 v4.0.0-alpha h1:XaltpDMGFqcEe+tsYTmqICeFhR+jTHHzc85YTVigEdE=
|
||||||
github.com/vulcanize/ipfs-ethdb/v3 v3.0.1/go.mod h1:SPBTTl5CqRexYfkI66pbzE5nQziYpoQ+7hH0XveA9IU=
|
github.com/vulcanize/ipfs-ethdb/v4 v4.0.0-alpha/go.mod h1:sSTqwalaV7CiXq83YkMJpZetoSxgcdzr0AG0NnuaGMM=
|
||||||
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
||||||
github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA=
|
github.com/warpfork/go-testmark v0.3.0 h1:Q81c4u7hT+BR5kNfNQhEF0VT2pmL7+Kk0wD+ORYl7iA=
|
||||||
github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0=
|
github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0=
|
||||||
|
@ -43,8 +43,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
validator "github.com/vulcanize/eth-ipfs-state-validator/v4/pkg"
|
||||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v4/postgres"
|
||||||
|
|
||||||
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
|
||||||
@ -59,31 +59,42 @@ var (
|
|||||||
|
|
||||||
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
||||||
// to contain a 65 byte secp256k1 signature.
|
// to contain a 65 byte secp256k1 signature.
|
||||||
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
header_cids.mh_key = blocks.key
|
||||||
|
AND header_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
header_cids.mh_key = blocks.key
|
||||||
|
AND header_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||||
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||||
WHERE header_cids.block_hash = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
|
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, transaction_cids.block_number, index
|
||||||
|
FROM public.blocks, eth.transaction_cids, eth.header_cids
|
||||||
WHERE blocks.key = transaction_cids.mh_key
|
WHERE blocks.key = transaction_cids.mh_key
|
||||||
|
AND blocks.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
AND transaction_cids.tx_hash = $1`
|
AND transaction_cids.tx_hash = $1`
|
||||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||||
WHERE state_accounts.header_id = state_cids.header_id AND state_accounts.state_path = state_cids.state_path
|
WHERE state_accounts.header_id = state_cids.header_id
|
||||||
|
AND state_accounts.state_path = state_cids.state_path
|
||||||
|
AND state_accounts.block_number = state_cids.block_number
|
||||||
AND state_cids.header_id = header_cids.block_hash
|
AND state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
AND state_leaf_key = $1
|
AND state_leaf_key = $1
|
||||||
AND block_number <= (SELECT block_number
|
AND header_cids.block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $2)
|
WHERE block_hash = $2)
|
||||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
||||||
)
|
)
|
||||||
|
@ -19,6 +19,7 @@ package eth
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -59,7 +60,7 @@ func (ecr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
|
|||||||
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
|
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
|
||||||
func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
||||||
var blockNumber int64
|
var blockNumber int64
|
||||||
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number DESC LIMIT 1 ")
|
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number DESC LIMIT 1")
|
||||||
return blockNumber, err
|
return blockNumber, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,9 +168,9 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
|
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
|
||||||
log.Debug("retrieving header cids for block ", blockNumber)
|
log.Debug("retrieving header cids for block ", blockNumber)
|
||||||
headers := make([]models.HeaderModel, 0)
|
headers := make([]models.HeaderModel, 0)
|
||||||
pgStr := `SELECT CAST(block_number as Text), block_hash,parent_hash,cid,mh_key,CAST(td as Text),node_id,
|
pgStr := `SELECT CAST(block_number as Text), block_hash, parent_hash, cid, mh_key, CAST(td as Text), node_id,
|
||||||
CAST(reward as Text), state_root,uncle_root,tx_root,receipt_root,bloom,timestamp,times_validated,
|
CAST(reward as Text), state_root, uncle_root,tx_root, receipt_root,bloom, timestamp, times_validated, coinbase
|
||||||
coinbase FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||||
}
|
}
|
||||||
@ -178,7 +179,8 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]m
|
|||||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
||||||
log.Debug("retrieving uncle cids for block id ", headerID)
|
log.Debug("retrieving uncle cids for block id ", headerID)
|
||||||
headers := make([]models.UncleModel, 0)
|
headers := make([]models.UncleModel, 0)
|
||||||
pgStr := `SELECT header_id,block_hash,parent_hash,cid,mh_key, CAST(reward as text) FROM eth.uncle_cids
|
pgStr := `SELECT CAST(block_number as Text), header_id, block_hash, parent_hash, cid, mh_key, CAST(reward as text)
|
||||||
|
FROM eth.uncle_cids
|
||||||
WHERE header_id = $1`
|
WHERE header_id = $1`
|
||||||
return headers, tx.Select(&headers, pgStr, headerID)
|
return headers, tx.Select(&headers, pgStr, headerID)
|
||||||
}
|
}
|
||||||
@ -190,10 +192,15 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
|
|||||||
args := make([]interface{}, 0, 3)
|
args := make([]interface{}, 0, 3)
|
||||||
results := make([]models.TxModel, 0)
|
results := make([]models.TxModel, 0)
|
||||||
id := 1
|
id := 1
|
||||||
pgStr := fmt.Sprintf(`SELECT transaction_cids.tx_hash, transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key,
|
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
||||||
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||||
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||||
WHERE header_cids.block_hash = $%d`, id)
|
FROM eth.transaction_cids
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
WHERE header_cids.block_hash = $%d`, id)
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
id++
|
id++
|
||||||
if len(txFilter.Dst) > 0 {
|
if len(txFilter.Dst) > 0 {
|
||||||
@ -292,11 +299,13 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
|||||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
log.Debug("retrieving receipt cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 4)
|
args := make([]interface{}, 0, 4)
|
||||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||||
receipt_cids.contract, receipt_cids.contract_hash
|
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
AND header_cids.block_hash = $1`
|
AND header_cids.block_hash = $1`
|
||||||
id := 2
|
id := 2
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
@ -308,20 +317,25 @@ func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter Receip
|
|||||||
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
|
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFilteredGQLLogs retrieves and returns all the log cIDs provided blockHash that conform to the provided
|
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||||
// filter parameters.
|
// filter parameters.
|
||||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
log.Debug("retrieving log cids for receipt ids")
|
log.Debug("retrieving log cids for receipt ids")
|
||||||
args := make([]interface{}, 0, 4)
|
args := make([]interface{}, 0, 4)
|
||||||
id := 1
|
id := 1
|
||||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
||||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
AND log_cids.leaf_mh_key = blocks.key
|
||||||
|
AND log_cids.block_number = blocks.block_number
|
||||||
|
AND header_cids.block_hash = $1`
|
||||||
|
|
||||||
args = append(args, blockHash.String())
|
args = append(args, blockHash.String())
|
||||||
id++
|
id++
|
||||||
@ -343,14 +357,17 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
|||||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
log.Debug("retrieving log cids for receipt ids")
|
log.Debug("retrieving log cids for receipt ids")
|
||||||
args := make([]interface{}, 0, 4)
|
args := make([]interface{}, 0, 4)
|
||||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||||
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
||||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
AND transaction_cids.header_id = header_cids.block_hash`
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number`
|
||||||
id := 1
|
id := 1
|
||||||
if blockNumber > 0 {
|
if blockNumber > 0 {
|
||||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||||
@ -380,10 +397,13 @@ func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilte
|
|||||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||||
args := make([]interface{}, 0, 5)
|
args := make([]interface{}, 0, 5)
|
||||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id
|
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||||
|
receipt_cids.leaf_mh_key,
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
AND transaction_cids.header_id = header_cids.block_hash`
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number`
|
||||||
id := 1
|
id := 1
|
||||||
if blockNumber > 0 {
|
if blockNumber > 0 {
|
||||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||||
@ -416,9 +436,13 @@ func hasTopics(topics [][]string) bool {
|
|||||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
||||||
log.Debug("retrieving state cids for header id ", headerID)
|
log.Debug("retrieving state cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 2)
|
args := make([]interface{}, 0, 2)
|
||||||
pgStr := `SELECT state_cids.header_id,
|
pgStr := `SELECT CAST(state_cids.block_number as Text), state_cids.header_id,
|
||||||
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
||||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
FROM eth.state_cids
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
WHERE header_cids.block_hash = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
addrLen := len(stateFilter.Addresses)
|
addrLen := len(stateFilter.Addresses)
|
||||||
@ -441,11 +465,15 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
|
|||||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||||
log.Debug("retrieving storage cids for header id ", headerID)
|
log.Debug("retrieving storage cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 3)
|
args := make([]interface{}, 0, 3)
|
||||||
pgStr := `SELECT storage_cids.header_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
pgStr := `SELECT CAST(storage_cids.block_number as Text), storage_cids.header_id, storage_cids.storage_leaf_key,
|
||||||
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path, state_cids.state_leaf_key
|
storage_cids.node_type, storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path,
|
||||||
|
state_cids.state_leaf_key
|
||||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||||
WHERE storage_cids.header_id = state_cids.header_id AND storage_cids.state_path = state_cids.state_path
|
WHERE storage_cids.header_id = state_cids.header_id
|
||||||
|
AND storage_cids.state_path = state_cids.state_path
|
||||||
|
AND storage_cids.block_number = state_cids.block_number
|
||||||
AND state_cids.header_id = header_cids.block_hash
|
AND state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
AND header_cids.block_hash = $1`
|
AND header_cids.block_hash = $1`
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
id := 2
|
id := 2
|
||||||
@ -496,6 +524,10 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
|||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return models.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
|
}
|
||||||
var uncleCIDs []models.UncleModel
|
var uncleCIDs []models.UncleModel
|
||||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -503,7 +535,7 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
|||||||
return models.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
var txCIDs []models.TxModel
|
var txCIDs []models.TxModel
|
||||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash)
|
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("tx cid retrieval error")
|
log.Error("tx cid retrieval error")
|
||||||
return models.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
@ -556,7 +588,7 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
|||||||
return models.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
var txCIDs []models.TxModel
|
var txCIDs []models.TxModel
|
||||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("tx cid retrieval error")
|
log.Error("tx cid retrieval error")
|
||||||
return models.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
@ -576,30 +608,33 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
|||||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
||||||
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
|
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
|
||||||
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
||||||
pgStr := `SELECT block_hash,cid,mh_key FROM eth.header_cids
|
pgStr := `SELECT block_hash, CAST(block_number as Text), cid, mh_key FROM eth.header_cids
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
var headerCID models.HeaderModel
|
var headerCID models.HeaderModel
|
||||||
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
||||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.TxModel, error) {
|
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string, blockNumber int64) ([]models.TxModel, error) {
|
||||||
log.Debug("retrieving tx cids for block id ", headerID)
|
log.Debug("retrieving tx cids for block id ", headerID)
|
||||||
pgStr := `SELECT * FROM eth.transaction_cids
|
pgStr := `SELECT CAST(block_number as Text), header_id, index, tx_hash, cid, mh_key,
|
||||||
WHERE header_id = $1
|
dst, src, tx_data, tx_type, value
|
||||||
|
FROM eth.transaction_cids
|
||||||
|
WHERE header_id = $1 AND block_number = $2
|
||||||
ORDER BY index`
|
ORDER BY index`
|
||||||
var txCIDs []models.TxModel
|
var txCIDs []models.TxModel
|
||||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
|
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
||||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||||
receipt_cids.contract, receipt_cids.contract_hash
|
receipt_cids.contract, receipt_cids.contract_hash
|
||||||
FROM eth.receipt_cids, eth.transaction_cids
|
FROM eth.receipt_cids, eth.transaction_cids
|
||||||
WHERE tx_id = ANY($1)
|
WHERE tx_id = ANY($1)
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
ORDER BY transaction_cids.index`
|
ORDER BY transaction_cids.index`
|
||||||
var rctCIDs []models.ReceiptModel
|
var rctCIDs []models.ReceiptModel
|
||||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
||||||
|
@ -249,6 +249,7 @@ var _ = Describe("Retriever", func() {
|
|||||||
AND header_cids.block_number = $1
|
AND header_cids.block_number = $1
|
||||||
ORDER BY transaction_cids.index`
|
ORDER BY transaction_cids.index`
|
||||||
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
@ -412,12 +413,13 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
||||||
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
|
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
|
||||||
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
BlockNumber: "1",
|
||||||
NodeType: 2,
|
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
||||||
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
NodeType: 2,
|
||||||
CID: test_helpers.State2CID.String(),
|
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
||||||
MhKey: test_helpers.State2MhKey,
|
CID: test_helpers.State2CID.String(),
|
||||||
Path: []byte{'\x0c'},
|
MhKey: test_helpers.State2MhKey,
|
||||||
|
Path: []byte{'\x0c'},
|
||||||
}))
|
}))
|
||||||
|
|
||||||
_, empty, err = retriever.Retrieve(rctTopicsAndAddressFilterFail, 1)
|
_, empty, err = retriever.Retrieve(rctTopicsAndAddressFilterFail, 1)
|
||||||
|
@ -82,8 +82,9 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.Header = models.IPLDModel{
|
response.Header = models.IPLDModel{
|
||||||
Data: headerRLP,
|
BlockNumber: payload.Block.Number().String(),
|
||||||
Key: cid.String(),
|
Data: headerRLP,
|
||||||
|
Key: cid.String(),
|
||||||
}
|
}
|
||||||
if headerFilter.Uncles {
|
if headerFilter.Uncles {
|
||||||
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
|
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
|
||||||
@ -97,8 +98,9 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.Uncles[i] = models.IPLDModel{
|
response.Uncles[i] = models.IPLDModel{
|
||||||
Data: uncleRlp,
|
BlockNumber: uncle.Number.String(),
|
||||||
Key: cid.String(),
|
Data: uncleRlp,
|
||||||
|
Key: cid.String(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -183,8 +185,9 @@ func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *
|
|||||||
// TODO: Verify this filter logic.
|
// TODO: Verify this filter logic.
|
||||||
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
||||||
response.Receipts = append(response.Receipts, models.IPLDModel{
|
response.Receipts = append(response.Receipts, models.IPLDModel{
|
||||||
Data: rctIPLDData[idx],
|
BlockNumber: payload.Block.Number().String(),
|
||||||
Key: rctLeafCID[idx].String(),
|
Data: rctIPLDData[idx],
|
||||||
|
Key: rctLeafCID[idx].String(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,8 +285,9 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
|||||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: stateNode.NodeValue,
|
BlockNumber: payload.Block.Number().String(),
|
||||||
Key: cid.String(),
|
Data: stateNode.NodeValue,
|
||||||
|
Key: cid.String(),
|
||||||
},
|
},
|
||||||
Type: stateNode.NodeType,
|
Type: stateNode.NodeType,
|
||||||
})
|
})
|
||||||
@ -300,8 +304,9 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
|||||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||||
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: storageNode.NodeValue,
|
BlockNumber: payload.Block.Number().String(),
|
||||||
Key: cid.String(),
|
Data: storageNode.NodeValue,
|
||||||
|
Key: cid.String(),
|
||||||
},
|
},
|
||||||
Type: storageNode.NodeType,
|
Type: storageNode.NodeType,
|
||||||
Path: storageNode.Path,
|
Path: storageNode.Path,
|
||||||
|
@ -61,14 +61,16 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
||||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
||||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State2IPLD.RawData(),
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.State2IPLD.Cid().String(),
|
Data: test_helpers.State2IPLD.RawData(),
|
||||||
|
Key: test_helpers.State2IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
||||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State1IPLD.RawData(),
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.State1IPLD.Cid().String(),
|
Data: test_helpers.State1IPLD.RawData(),
|
||||||
|
Key: test_helpers.State1IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,8 +89,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds1.Receipts)).To(Equal(1))
|
Expect(len(iplds1.Receipts)).To(Equal(1))
|
||||||
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
|
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD,
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.Rct1CID.String(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
|
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
|
||||||
@ -102,8 +105,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds2.Receipts)).To(Equal(1))
|
Expect(len(iplds2.Receipts)).To(Equal(1))
|
||||||
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
|
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD,
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.Rct1CID.String(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
|
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
|
||||||
@ -117,8 +121,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds3.Receipts)).To(Equal(1))
|
Expect(len(iplds3.Receipts)).To(Equal(1))
|
||||||
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
|
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD,
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.Rct1CID.String(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
|
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
|
||||||
@ -132,8 +137,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds4.Receipts)).To(Equal(1))
|
Expect(len(iplds4.Receipts)).To(Equal(1))
|
||||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct2IPLD,
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.Rct2CID.String(),
|
Data: test_helpers.Rct2IPLD,
|
||||||
|
Key: test_helpers.Rct2CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
|
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
|
||||||
@ -165,8 +171,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds6.Receipts)).To(Equal(1))
|
Expect(len(iplds6.Receipts)).To(Equal(1))
|
||||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct2IPLD,
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.Rct2CID.String(),
|
Data: test_helpers.Rct2IPLD,
|
||||||
|
Key: test_helpers.Rct2CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
|
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
|
||||||
@ -181,8 +188,9 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
||||||
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
||||||
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
|
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State2IPLD.RawData(),
|
BlockNumber: test_helpers.BlockNumber.String(),
|
||||||
Key: test_helpers.State2IPLD.Cid().String(),
|
Data: test_helpers.State2IPLD.RawData(),
|
||||||
|
Key: test_helpers.State2IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
|
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
@ -102,13 +103,19 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
|
|||||||
// FetchHeaders fetches headers
|
// FetchHeaders fetches headers
|
||||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
||||||
log.Debug("fetching header ipld")
|
log.Debug("fetching header ipld")
|
||||||
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return models.IPLDModel{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
headerBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return models.IPLDModel{}, err
|
return models.IPLDModel{}, err
|
||||||
}
|
}
|
||||||
return models.IPLDModel{
|
return models.IPLDModel{
|
||||||
Data: headerBytes,
|
BlockNumber: c.BlockNumber,
|
||||||
Key: c.CID,
|
Data: headerBytes,
|
||||||
|
Key: c.CID,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,13 +124,18 @@ func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]mode
|
|||||||
log.Debug("fetching uncle iplds")
|
log.Debug("fetching uncle iplds")
|
||||||
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
uncleBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uncleIPLDs[i] = models.IPLDModel{
|
uncleIPLDs[i] = models.IPLDModel{
|
||||||
Data: uncleBytes,
|
BlockNumber: c.BlockNumber,
|
||||||
Key: c.CID,
|
Data: uncleBytes,
|
||||||
|
Key: c.CID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return uncleIPLDs, nil
|
return uncleIPLDs, nil
|
||||||
@ -134,13 +146,18 @@ func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IP
|
|||||||
log.Debug("fetching transaction iplds")
|
log.Debug("fetching transaction iplds")
|
||||||
trxIPLDs := make([]models.IPLDModel, len(cids))
|
trxIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
txBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
trxIPLDs[i] = models.IPLDModel{
|
trxIPLDs[i] = models.IPLDModel{
|
||||||
Data: txBytes,
|
BlockNumber: c.BlockNumber,
|
||||||
Key: c.CID,
|
Data: txBytes,
|
||||||
|
Key: c.CID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return trxIPLDs, nil
|
return trxIPLDs, nil
|
||||||
@ -151,14 +168,19 @@ func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]mode
|
|||||||
log.Debug("fetching receipt iplds")
|
log.Debug("fetching receipt iplds")
|
||||||
rctIPLDs := make([]models.IPLDModel, len(cids))
|
rctIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.LeafMhKey)
|
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rctBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.LeafMhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
//nodeVal, err := DecodeLeafNode(rctBytes)
|
//nodeVal, err := DecodeLeafNode(rctBytes)
|
||||||
rctIPLDs[i] = models.IPLDModel{
|
rctIPLDs[i] = models.IPLDModel{
|
||||||
Data: rctBytes,
|
BlockNumber: c.BlockNumber,
|
||||||
Key: c.LeafCID,
|
Data: rctBytes,
|
||||||
|
Key: c.LeafCID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return rctIPLDs, nil
|
return rctIPLDs, nil
|
||||||
@ -172,14 +194,19 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]S
|
|||||||
if stateNode.CID == "" {
|
if stateNode.CID == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
stateBytes, err := shared.FetchIPLDByMhKey(tx, stateNode.MhKey)
|
blockNumber, err := strconv.ParseUint(stateNode.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stateBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, stateNode.MhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
stateNodes = append(stateNodes, StateNode{
|
stateNodes = append(stateNodes, StateNode{
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: stateBytes,
|
BlockNumber: stateNode.BlockNumber,
|
||||||
Key: stateNode.CID,
|
Data: stateBytes,
|
||||||
|
Key: stateNode.CID,
|
||||||
},
|
},
|
||||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||||
Type: ResolveToNodeType(stateNode.NodeType),
|
Type: ResolveToNodeType(stateNode.NodeType),
|
||||||
@ -197,14 +224,19 @@ func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithSta
|
|||||||
if storageNode.CID == "" || storageNode.StateKey == "" {
|
if storageNode.CID == "" || storageNode.StateKey == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
storageBytes, err := shared.FetchIPLDByMhKey(tx, storageNode.MhKey)
|
blockNumber, err := strconv.ParseUint(storageNode.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
storageBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, storageNode.MhKey, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
storageNodes = append(storageNodes, StorageNode{
|
storageNodes = append(storageNodes, StorageNode{
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: storageBytes,
|
BlockNumber: storageNode.BlockNumber,
|
||||||
Key: storageNode.CID,
|
Data: storageBytes,
|
||||||
|
Key: storageNode.CID,
|
||||||
},
|
},
|
||||||
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
||||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||||
|
@ -36,93 +36,180 @@ const (
|
|||||||
|
|
||||||
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
header_cids.mh_key = blocks.key
|
||||||
|
AND header_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
header_cids.mh_key = blocks.key
|
||||||
|
AND header_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
header_cids.mh_key = blocks.key
|
||||||
|
AND header_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
uncle_cids.mh_key = blocks.key
|
||||||
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
uncle_cids.header_id = header_cids.block_hash
|
||||||
|
AND uncle_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
uncle_cids.mh_key = blocks.key
|
||||||
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
uncle_cids.header_id = header_cids.block_hash
|
||||||
|
AND uncle_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
uncle_cids.mh_key = blocks.key
|
||||||
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
uncle_cids.mh_key = blocks.key
|
||||||
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveTransactionsByHashesPgStr = `SELECT cid, data
|
RetrieveTransactionsByHashesPgStr = `SELECT cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
transaction_cids.mh_key = blocks.key
|
||||||
|
AND transaction_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
transaction_cids.mh_key = blocks.key
|
||||||
|
AND transaction_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = $1
|
WHERE block_hash = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
transaction_cids.mh_key = blocks.key
|
||||||
|
AND transaction_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_number = $1
|
WHERE block_number = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveTransactionByHashPgStr = `SELECT cid, data
|
RetrieveTransactionByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (
|
||||||
|
transaction_cids.mh_key = blocks.key
|
||||||
|
AND transaction_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE tx_hash = $1`
|
WHERE tx_hash = $1`
|
||||||
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
INNER JOIN eth.transaction_cids ON (
|
||||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
INNER JOIN eth.transaction_cids ON (
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_hash = $1
|
WHERE block_hash = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
INNER JOIN eth.transaction_cids ON (
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE block_number = $1
|
WHERE block_number = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
INNER JOIN eth.transaction_cids ON (
|
||||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE tx_hash = $1`
|
WHERE tx_hash = $1`
|
||||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
state_cids.mh_key = blocks.key
|
||||||
|
AND state_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND block_number <= (SELECT block_number
|
AND header_cids.block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $2)
|
WHERE block_hash = $2)
|
||||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
INNER JOIN eth.header_cids ON (
|
||||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
state_cids.mh_key = blocks.key
|
||||||
|
AND state_cids.block_number = blocks.block_number
|
||||||
|
)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND block_number <= $2
|
AND block_number <= $2
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
@ -132,9 +219,16 @@ const (
|
|||||||
INNER JOIN eth.state_cids ON (
|
INNER JOIN eth.state_cids ON (
|
||||||
storage_cids.header_id = state_cids.header_id
|
storage_cids.header_id = state_cids.header_id
|
||||||
AND storage_cids.state_path = state_cids.state_path
|
AND storage_cids.state_path = state_cids.state_path
|
||||||
|
AND storage_cids.block_number = state_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
storage_cids.mh_key = blocks.key
|
||||||
|
AND storage_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
|
||||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND block_number <= $3
|
AND block_number <= $3
|
||||||
@ -145,16 +239,23 @@ const (
|
|||||||
INNER JOIN eth.state_cids ON (
|
INNER JOIN eth.state_cids ON (
|
||||||
storage_cids.header_id = state_cids.header_id
|
storage_cids.header_id = state_cids.header_id
|
||||||
AND storage_cids.state_path = state_cids.state_path
|
AND storage_cids.state_path = state_cids.state_path
|
||||||
|
AND storage_cids.block_number = state_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (
|
||||||
|
state_cids.header_id = header_cids.block_hash
|
||||||
|
AND state_cids.block_number = header_cids.block_number
|
||||||
|
)
|
||||||
|
INNER JOIN public.blocks ON (
|
||||||
|
storage_cids.mh_key = blocks.key
|
||||||
|
AND storage_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
|
||||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND block_number <= (SELECT block_number
|
AND header_cids.block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $3)
|
WHERE block_hash = $3)
|
||||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -231,40 +231,44 @@ var (
|
|||||||
}
|
}
|
||||||
MockTrxMetaPostPublsh = []models.TxModel{
|
MockTrxMetaPostPublsh = []models.TxModel{
|
||||||
{
|
{
|
||||||
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
BlockNumber: "1",
|
||||||
MhKey: Trx1MhKey,
|
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
||||||
Src: SenderAddr.Hex(),
|
MhKey: Trx1MhKey,
|
||||||
Dst: Address.String(),
|
Src: SenderAddr.Hex(),
|
||||||
Index: 0,
|
Dst: Address.String(),
|
||||||
TxHash: MockTransactions[0].Hash().String(),
|
Index: 0,
|
||||||
Data: []byte{},
|
TxHash: MockTransactions[0].Hash().String(),
|
||||||
|
Data: []byte{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: Trx2CID.String(),
|
BlockNumber: "1",
|
||||||
MhKey: Trx2MhKey,
|
CID: Trx2CID.String(),
|
||||||
Src: SenderAddr.Hex(),
|
MhKey: Trx2MhKey,
|
||||||
Dst: AnotherAddress.String(),
|
Src: SenderAddr.Hex(),
|
||||||
Index: 1,
|
Dst: AnotherAddress.String(),
|
||||||
TxHash: MockTransactions[1].Hash().String(),
|
Index: 1,
|
||||||
Data: []byte{},
|
TxHash: MockTransactions[1].Hash().String(),
|
||||||
|
Data: []byte{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: Trx3CID.String(),
|
BlockNumber: "1",
|
||||||
MhKey: Trx3MhKey,
|
CID: Trx3CID.String(),
|
||||||
Src: SenderAddr.Hex(),
|
MhKey: Trx3MhKey,
|
||||||
Dst: "",
|
Src: SenderAddr.Hex(),
|
||||||
Index: 2,
|
Dst: "",
|
||||||
TxHash: MockTransactions[2].Hash().String(),
|
Index: 2,
|
||||||
Data: MockContractByteCode,
|
TxHash: MockTransactions[2].Hash().String(),
|
||||||
|
Data: MockContractByteCode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: Trx4CID.String(),
|
BlockNumber: "1",
|
||||||
MhKey: Trx4MhKey,
|
CID: Trx4CID.String(),
|
||||||
Src: SenderAddr.Hex(),
|
MhKey: Trx4MhKey,
|
||||||
Dst: AnotherAddress1.String(),
|
Src: SenderAddr.Hex(),
|
||||||
Index: 3,
|
Dst: AnotherAddress1.String(),
|
||||||
TxHash: MockTransactions[3].Hash().String(),
|
Index: 3,
|
||||||
Data: []byte{},
|
TxHash: MockTransactions[3].Hash().String(),
|
||||||
|
Data: []byte{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
MockRctMeta = []models.ReceiptModel{
|
MockRctMeta = []models.ReceiptModel{
|
||||||
@ -296,24 +300,28 @@ var (
|
|||||||
|
|
||||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||||
{
|
{
|
||||||
|
BlockNumber: "1",
|
||||||
LeafCID: Rct1CID.String(),
|
LeafCID: Rct1CID.String(),
|
||||||
LeafMhKey: Rct1MhKey,
|
LeafMhKey: Rct1MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
ContractHash: "",
|
ContractHash: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
BlockNumber: "1",
|
||||||
LeafCID: Rct2CID.String(),
|
LeafCID: Rct2CID.String(),
|
||||||
LeafMhKey: Rct2MhKey,
|
LeafMhKey: Rct2MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
ContractHash: "",
|
ContractHash: "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
BlockNumber: "1",
|
||||||
LeafCID: Rct3CID.String(),
|
LeafCID: Rct3CID.String(),
|
||||||
LeafMhKey: Rct3MhKey,
|
LeafMhKey: Rct3MhKey,
|
||||||
Contract: ContractAddress.String(),
|
Contract: ContractAddress.String(),
|
||||||
ContractHash: ContractHash,
|
ContractHash: ContractHash,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
BlockNumber: "1",
|
||||||
LeafCID: Rct4CID.String(),
|
LeafCID: Rct4CID.String(),
|
||||||
LeafMhKey: Rct4MhKey,
|
LeafMhKey: Rct4MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
@ -391,18 +399,20 @@ var (
|
|||||||
}
|
}
|
||||||
MockStateMetaPostPublish = []models.StateNodeModel{
|
MockStateMetaPostPublish = []models.StateNodeModel{
|
||||||
{
|
{
|
||||||
CID: State1CID.String(),
|
BlockNumber: "1",
|
||||||
MhKey: State1MhKey,
|
CID: State1CID.String(),
|
||||||
Path: []byte{'\x06'},
|
MhKey: State1MhKey,
|
||||||
NodeType: 2,
|
Path: []byte{'\x06'},
|
||||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
NodeType: 2,
|
||||||
|
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: State2CID.String(),
|
BlockNumber: "1",
|
||||||
MhKey: State2MhKey,
|
CID: State2CID.String(),
|
||||||
Path: []byte{'\x0c'},
|
MhKey: State2MhKey,
|
||||||
NodeType: 2,
|
Path: []byte{'\x0c'},
|
||||||
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
NodeType: 2,
|
||||||
|
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
MockStorageNodes = map[string][]sdtypes.StorageNode{
|
MockStorageNodes = map[string][]sdtypes.StorageNode{
|
||||||
@ -461,12 +471,13 @@ var (
|
|||||||
StateNodes: MockStateMetaPostPublish,
|
StateNodes: MockStateMetaPostPublish,
|
||||||
StorageNodes: []models.StorageNodeWithStateKeyModel{
|
StorageNodes: []models.StorageNodeWithStateKeyModel{
|
||||||
{
|
{
|
||||||
Path: []byte{},
|
BlockNumber: "1",
|
||||||
CID: StorageCID.String(),
|
Path: []byte{},
|
||||||
MhKey: StorageMhKey,
|
CID: StorageCID.String(),
|
||||||
NodeType: 2,
|
MhKey: StorageMhKey,
|
||||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
NodeType: 2,
|
||||||
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
|
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||||
|
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -483,43 +494,52 @@ var (
|
|||||||
MockIPLDs = eth.IPLDs{
|
MockIPLDs = eth.IPLDs{
|
||||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||||
Header: models.IPLDModel{
|
Header: models.IPLDModel{
|
||||||
Data: HeaderIPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: HeaderIPLD.Cid().String(),
|
Data: HeaderIPLD.RawData(),
|
||||||
|
Key: HeaderIPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Transactions: []models.IPLDModel{
|
Transactions: []models.IPLDModel{
|
||||||
{
|
{
|
||||||
Data: Trx1IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Trx1IPLD.Cid().String(),
|
Data: Trx1IPLD.RawData(),
|
||||||
|
Key: Trx1IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Trx2IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Trx2IPLD.Cid().String(),
|
Data: Trx2IPLD.RawData(),
|
||||||
|
Key: Trx2IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Trx3IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Trx3IPLD.Cid().String(),
|
Data: Trx3IPLD.RawData(),
|
||||||
|
Key: Trx3IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Trx4IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Trx4IPLD.Cid().String(),
|
Data: Trx4IPLD.RawData(),
|
||||||
|
Key: Trx4IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Receipts: []models.IPLDModel{
|
Receipts: []models.IPLDModel{
|
||||||
{
|
{
|
||||||
Data: Rct1IPLD,
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Rct1CID.String(),
|
Data: Rct1IPLD,
|
||||||
|
Key: Rct1CID.String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Rct2IPLD,
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Rct2CID.String(),
|
Data: Rct2IPLD,
|
||||||
|
Key: Rct2CID.String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Rct3IPLD,
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Rct3CID.String(),
|
Data: Rct3IPLD,
|
||||||
|
Key: Rct3CID.String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Rct4IPLD,
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: Rct4CID.String(),
|
Data: Rct4IPLD,
|
||||||
|
Key: Rct4CID.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StateNodes: []eth.StateNode{
|
StateNodes: []eth.StateNode{
|
||||||
@ -527,8 +547,9 @@ var (
|
|||||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: State1IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: State1IPLD.Cid().String(),
|
Data: State1IPLD.RawData(),
|
||||||
|
Key: State1IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
},
|
},
|
||||||
@ -536,8 +557,9 @@ var (
|
|||||||
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: State2IPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: State2IPLD.Cid().String(),
|
Data: State2IPLD.RawData(),
|
||||||
|
Key: State2IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
},
|
},
|
||||||
@ -548,8 +570,9 @@ var (
|
|||||||
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: models.IPLDModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: StorageIPLD.RawData(),
|
BlockNumber: BlockNumber.String(),
|
||||||
Key: StorageIPLD.Cid().String(),
|
Data: StorageIPLD.RawData(),
|
||||||
|
Key: StorageIPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{},
|
Path: []byte{},
|
||||||
},
|
},
|
||||||
|
@ -18,11 +18,9 @@ package shared
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -50,31 +48,11 @@ func Rollback(tx *sqlx.Tx) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishIPLD is used to insert an ipld into Postgres blockstore with the provided tx
|
// FetchIPLDByMhKeyAndBlockNumber is used to retrieve an ipld from Postgres blockstore with the provided tx, mhkey string and blockNumber
|
||||||
func PublishIPLD(tx *sqlx.Tx, i node.Node) error {
|
func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||||
dbKey := dshelp.MultihashToDsKey(i.Cid().Hash())
|
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
|
||||||
raw := i.RawData()
|
|
||||||
_, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string
|
|
||||||
func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
|
|
||||||
mhKey, err := MultihashKeyFromCIDString(cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
|
||||||
var block []byte
|
var block []byte
|
||||||
return block, tx.Get(&block, pgStr, mhKey)
|
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
||||||
}
|
|
||||||
|
|
||||||
// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string
|
|
||||||
func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) {
|
|
||||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
|
||||||
var block []byte
|
|
||||||
return block, tx.Get(&block, pgStr, mhKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||||
@ -92,15 +70,3 @@ func MultihashKeyFromCIDString(c string) (string, error) {
|
|||||||
dbKey := dshelp.MultihashToDsKey(dc.Hash())
|
dbKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
|
||||||
func PublishRaw(tx *sqlx.Tx, codec, mh uint64, raw []byte) (string, error) {
|
|
||||||
c, err := ipld.RawdataToCid(codec, raw, mh)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
|
||||||
_, err = tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
|
||||||
return c.String(), err
|
|
||||||
}
|
|
||||||
|
@ -58,8 +58,12 @@ func SetupDB() *sqlx.DB {
|
|||||||
func TearDownDB(db *sqlx.DB) {
|
func TearDownDB(db *sqlx.DB) {
|
||||||
tx, err := db.Beginx()
|
tx, err := db.Beginx()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM nodes`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.uncle_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
||||||
@ -68,6 +72,10 @@ func TearDownDB(db *sqlx.DB) {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.state_accounts`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.access_list_elements`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
|
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
|
||||||
|
@ -1,8 +1,26 @@
|
|||||||
# Clear up existing docker images and volume.
|
#!/bin/bash
|
||||||
docker-compose down --remove-orphans --volumes
|
|
||||||
|
|
||||||
docker-compose -f docker-compose.yml up -d ipld-eth-db
|
set -e
|
||||||
sleep 10
|
|
||||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
|
||||||
|
|
||||||
docker-compose down --remove-orphans --volumes
|
mkdir -p out
|
||||||
|
|
||||||
|
# Remove existing docker-tsdb directory
|
||||||
|
rm -rf out/docker-tsdb/
|
||||||
|
|
||||||
|
# Copy over files to setup TimescaleDB
|
||||||
|
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
|
||||||
|
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
|
||||||
|
docker rm -v $ID
|
||||||
|
|
||||||
|
# Spin up TimescaleDB
|
||||||
|
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml up ipld-eth-db
|
||||||
|
trap "docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes; rm -rf out/" SIGINT SIGTERM ERR
|
||||||
|
sleep 45
|
||||||
|
|
||||||
|
# Run unit tests
|
||||||
|
go clean -testcache
|
||||||
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8066 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing_v4 make test
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes
|
||||||
|
rm -rf out/
|
||||||
|
Loading…
Reference in New Issue
Block a user