forked from cerc-io/ipld-eth-server
Update transactions, receipts and logs queries (#173)
* Update queries to fetch data from cids tables * Fix eth_getTransactionByHash to return tx from canonical block * Fetch transaction by hash in a single query * Update queries to fetch data from IPLD blocks table * Avoid NULL result from canonical block hash query * Update GitHub workflow and script to run unit tests * Avoid usage of LIMIT 1 in queries for GQL API * Fetch IPLD data separately when retrieving latest account data or storage value * Upgrade geth dependency * Update GitHub workflow
This commit is contained in:
parent
bcd00c6e2b
commit
6cb54ca790
6
.github/workflows/on-pr-publish.yaml
vendored
6
.github/workflows/on-pr-publish.yaml
vendored
@ -28,9 +28,9 @@ jobs:
|
|||||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||||
with:
|
with:
|
||||||
STACK_ORCHESTRATOR_REF: "382aca8e42bc5e33f301f77cdd2e09cc80602fc3"
|
STACK_ORCHESTRATOR_REF: "f2fd766f5400fcb9eb47b50675d2e3b1f2753702"
|
||||||
GO_ETHEREUM_REF: "c265fdc30915e01bb633203acbbd9d5009a7ddf2"
|
GO_ETHEREUM_REF: "7b4ef34de2b9469c3f82972b60e38b34c99c5382"
|
||||||
IPLD_ETH_DB_REF: "4e948c58ce20c20ab633289f986d2ed2a1fe02ec"
|
IPLD_ETH_DB_REF: "b59505eab252670c622b42ce60621e9747fb64f9"
|
||||||
build:
|
build:
|
||||||
name: Run docker build
|
name: Run docker build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
24
.github/workflows/run_unit_test.sh
vendored
24
.github/workflows/run_unit_test.sh
vendored
@ -12,31 +12,17 @@ cd ipld-eth-server
|
|||||||
## Remove the branch and github related info. This way future runs wont be confused.
|
## Remove the branch and github related info. This way future runs wont be confused.
|
||||||
rm -f /tmp/git_head_ref /tmp/git_repository
|
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||||
|
|
||||||
# Setup the DB
|
# Spin up DB and run migrations
|
||||||
cd $temp_dir
|
docker-compose up -d migrations ipld-eth-db
|
||||||
git clone "https://github.com/vulcanize/ipld-eth-db.git"; cd ipld-eth-db; git checkout $(cat /tmp/ipld_eth_db_ref)
|
trap "docker-compose down -v --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||||
|
sleep 30
|
||||||
# Spin Up DB using Stack Orchestrator
|
|
||||||
cd $temp_dir
|
|
||||||
git clone "https://github.com/vulcanize/stack-orchestrator.git"; cd stack-orchestrator; git checkout $(cat /tmp/stack_orchestrator_ref)
|
|
||||||
|
|
||||||
cd ${temp_dir}/stack-orchestrator
|
|
||||||
echo vulcanize_ipld_eth_db=${temp_dir}/ipld-eth-db > ./config.sh
|
|
||||||
|
|
||||||
## Remove existing containers if they are present
|
|
||||||
docker-compose -f docker/local/docker-compose-db-sharding.yml --env-file ./config.sh down -v --remove-orphans;
|
|
||||||
|
|
||||||
trap 'cd ${temp_dir}/stack-orchestrator; docker-compose -f docker/local/docker-compose-db-sharding.yml --env-file ./config.sh down -v --remove-orphans; ' SIGINT SIGTERM
|
|
||||||
docker-compose -f docker/local/docker-compose-db-sharding.yml --env-file ./config.sh up -d
|
|
||||||
|
|
||||||
# Remove old logs so there's no confusion, then run test
|
# Remove old logs so there's no confusion, then run test
|
||||||
rm -f /tmp/test.log /tmp/return_test.txt
|
rm -f /tmp/test.log /tmp/return_test.txt
|
||||||
cd ${temp_dir}/ipld-eth-server
|
|
||||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
||||||
echo $? > /tmp/return_test.txt
|
echo $? > /tmp/return_test.txt
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
|
docker-compose down -v --remove-orphans
|
||||||
cd ${temp_dir}/stack-orchestrator; docker-compose -f docker/local/docker-compose-db-sharding.yml --env-file ./config.sh down -v --remove-orphans
|
|
||||||
cd $start_dir
|
cd $start_dir
|
||||||
rm -fr $temp_dir
|
rm -fr $temp_dir
|
||||||
|
12
.github/workflows/tests.yaml
vendored
12
.github/workflows/tests.yaml
vendored
@ -46,8 +46,6 @@ jobs:
|
|||||||
- name: Output variables to files
|
- name: Output variables to files
|
||||||
run: |
|
run: |
|
||||||
echo $GITHUB_REPOSITORY > /tmp/git_repository
|
echo $GITHUB_REPOSITORY > /tmp/git_repository
|
||||||
echo ${{ inputs.IPLD_ETH_DB_REF }} > /tmp/ipld_eth_db_ref
|
|
||||||
echo ${{ inputs.STACK_ORCHESTRATOR_REF }} > /tmp/stack_orchestrator_ref
|
|
||||||
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
|
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
|
||||||
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
|
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||||
echo ${{ env.BUILD_KEY }} >> /tmp/key
|
echo ${{ env.BUILD_KEY }} >> /tmp/key
|
||||||
@ -55,15 +53,11 @@ jobs:
|
|||||||
chmod 400 /tmp/key
|
chmod 400 /tmp/key
|
||||||
cat /tmp/git_repository
|
cat /tmp/git_repository
|
||||||
cat /tmp/git_head_ref
|
cat /tmp/git_head_ref
|
||||||
cat /tmp/stack_orchestrator_ref
|
|
||||||
cat /tmp/ipld_eth_db_ref
|
|
||||||
|
|
||||||
- name: Raw SCP
|
- name: Raw SCP
|
||||||
run: |
|
run: |
|
||||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
|
||||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
|
||||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/stack_orchestrator_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/stack_orchestrator_ref
|
|
||||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/ipld_eth_db_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/ipld_eth_db_ref
|
|
||||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
|
||||||
|
|
||||||
- name: Trigger Unit Test
|
- name: Trigger Unit Test
|
||||||
@ -121,12 +115,11 @@ jobs:
|
|||||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
|
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
|
||||||
|
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||||
echo db_write=$DB_WRITE >> ./config.sh
|
echo db_write=$DB_WRITE >> ./config.sh
|
||||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||||
echo watched_address_gap_filler_enabled=false >> ./config.sh
|
|
||||||
echo watched_address_gap_filler_interval=5 >> ./config.sh
|
|
||||||
cat ./config.sh
|
cat ./config.sh
|
||||||
- name: Build geth
|
- name: Build geth
|
||||||
run: |
|
run: |
|
||||||
@ -190,12 +183,11 @@ jobs:
|
|||||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
|
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
|
||||||
|
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||||
echo db_write=$DB_WRITE >> ./config.sh
|
echo db_write=$DB_WRITE >> ./config.sh
|
||||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||||
echo watched_address_gap_filler_enabled=false >> ./config.sh
|
|
||||||
echo watched_address_gap_filler_interval=5 >> ./config.sh
|
|
||||||
cat ./config.sh
|
cat ./config.sh
|
||||||
- name: Build geth
|
- name: Build geth
|
||||||
run: |
|
run: |
|
||||||
|
@ -22,13 +22,13 @@ Additional, unique endpoints are exposed which utilize the new indexes and state
|
|||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
Minimal build dependencies
|
Minimal build dependencies
|
||||||
* Go (1.13)
|
* Go (1.18)
|
||||||
* Git
|
* Git
|
||||||
* GCC compiler
|
* GCC compiler
|
||||||
* This repository
|
* This repository
|
||||||
|
|
||||||
External dependency
|
External dependency
|
||||||
* Postgres database populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
|
* Postgres database populated by [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db)
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
Start by downloading ipld-eth-server and moving into the repo:
|
Start by downloading ipld-eth-server and moving into the repo:
|
||||||
|
@ -1,18 +1,29 @@
|
|||||||
version: '3.2'
|
version: '3.2'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
ipld-eth-db:
|
migrations:
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- access-node
|
- ipld-eth-db
|
||||||
image: vulcanize/ipld-eth-db:v4.1.1-alpha
|
image: vulcanize/ipld-eth-db:v4.2.0-alpha
|
||||||
environment:
|
environment:
|
||||||
DATABASE_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
DATABASE_NAME: "vulcanize_testing_v4"
|
DATABASE_NAME: "vulcanize_testing"
|
||||||
DATABASE_PASSWORD: "password"
|
DATABASE_PASSWORD: "password"
|
||||||
DATABASE_HOSTNAME: "access-node"
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
DATABASE_PORT: 5432
|
DATABASE_PORT: 5432
|
||||||
|
|
||||||
|
ipld-eth-db:
|
||||||
|
image: timescale/timescaledb:latest-pg14
|
||||||
|
restart: always
|
||||||
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "vdbm"
|
||||||
|
POSTGRES_DB: "vulcanize_testing"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8077:5432"
|
||||||
|
|
||||||
eth-server:
|
eth-server:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
@ -44,20 +55,5 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8081:8081"
|
- "127.0.0.1:8081:8081"
|
||||||
|
|
||||||
graphql:
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
- ipld-eth-db
|
|
||||||
image: vulcanize/postgraphile:v1.0.1
|
|
||||||
environment:
|
|
||||||
- PG_HOST=db
|
|
||||||
- PG_PORT=5432
|
|
||||||
- PG_DATABASE=vulcanize_public
|
|
||||||
- PG_USER=vdbm
|
|
||||||
- PG_PASSWORD=password
|
|
||||||
- SCHEMA=public,eth
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:5000:5000"
|
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
vdb_db_eth_server:
|
vdb_db_eth_server:
|
||||||
|
2
go.mod
2
go.mod
@ -289,4 +289,4 @@ require (
|
|||||||
lukechampine.com/blake3 v1.1.6 // indirect
|
lukechampine.com/blake3 v1.1.6 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha
|
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha
|
||||||
|
4
go.sum
4
go.sum
@ -1662,8 +1662,8 @@ github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha h1:sDDK4eOdW3JEds+
|
|||||||
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha/go.mod h1:/pHfZd1IWsSTpCtGq6nnzUZBAkLV+zMrRh6Z3Hr3NFc=
|
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha/go.mod h1:/pHfZd1IWsSTpCtGq6nnzUZBAkLV+zMrRh6Z3Hr3NFc=
|
||||||
github.com/vulcanize/gap-filler v0.4.0 h1:5VD9PG7UrjEub4rLxZmstWoHnBnVtXz9silIVdrnTsM=
|
github.com/vulcanize/gap-filler v0.4.0 h1:5VD9PG7UrjEub4rLxZmstWoHnBnVtXz9silIVdrnTsM=
|
||||||
github.com/vulcanize/gap-filler v0.4.0/go.mod h1:5awUyotIoJi6AuG0JPEm7SIwFZBD7Ecg0I8x7CdxcHI=
|
github.com/vulcanize/gap-filler v0.4.0/go.mod h1:5awUyotIoJi6AuG0JPEm7SIwFZBD7Ecg0I8x7CdxcHI=
|
||||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha h1:xD4fA2khoAnhBEk84JwrIEGvQCndVXpQGv5n7a9cgwc=
|
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha h1:8ge2ban6t/e53XDwe6s28jcCevT7Ggo51lNJ0Eo1PgA=
|
||||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha/go.mod h1:5tMN+CDbK/qI2UlfN307HJykDmVIOCB1FM5RcHK9Kp8=
|
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha/go.mod h1:5tMN+CDbK/qI2UlfN307HJykDmVIOCB1FM5RcHK9Kp8=
|
||||||
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha h1:xak1uYmFWqJ2Hz3pM+0jDcqdlwYwRWeSkQV6B8IxD/0=
|
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha h1:xak1uYmFWqJ2Hz3pM+0jDcqdlwYwRWeSkQV6B8IxD/0=
|
||||||
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha/go.mod h1:pHbLbW4Hk1IFpxrY9yi50IuoPPzmSY7lwOqpFAa369k=
|
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha/go.mod h1:pHbLbW4Hk1IFpxrY9yi50IuoPPzmSY7lwOqpFAa369k=
|
||||||
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
||||||
|
@ -52,22 +52,22 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errPendingBlockNumber = errors.New("pending block number not supported")
|
errPendingBlockNumber = errors.New("pending block number not supported")
|
||||||
errNegativeBlockNumber = errors.New("negative block number not supported")
|
errNegativeBlockNumber = errors.New("negative block number not supported")
|
||||||
errHeaderHashNotFound = errors.New("header for hash not found")
|
errHeaderHashNotFound = errors.New("header for hash not found")
|
||||||
errHeaderNotFound = errors.New("header not found")
|
errHeaderNotFound = errors.New("header not found")
|
||||||
|
errMultipleHeadersForHash = errors.New("more than one headers for the given hash")
|
||||||
|
errTxHashNotFound = errors.New("transaction for hash not found")
|
||||||
|
errTxHashInMultipleBlocks = errors.New("transaction for hash found in more than one canonical block")
|
||||||
|
|
||||||
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
||||||
// to contain a 65 byte secp256k1 signature.
|
// to contain a 65 byte secp256k1 signature.
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash
|
||||||
INNER JOIN public.blocks ON (
|
FROM canonical_header_hash($1) AS block_hash
|
||||||
header_cids.mh_key = blocks.key
|
WHERE block_hash IS NOT NULL`
|
||||||
AND header_cids.block_number = blocks.block_number
|
|
||||||
)
|
|
||||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
|
||||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
header_cids.mh_key = blocks.key
|
header_cids.mh_key = blocks.key
|
||||||
@ -76,13 +76,12 @@ const (
|
|||||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||||
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||||
WHERE header_cids.block_hash = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, transaction_cids.block_number, index
|
RetrieveRPCTransaction = `SELECT blocks.data, header_id, transaction_cids.block_number, index
|
||||||
FROM public.blocks, eth.transaction_cids, eth.header_cids
|
FROM public.blocks, eth.transaction_cids
|
||||||
WHERE blocks.key = transaction_cids.mh_key
|
WHERE blocks.key = transaction_cids.mh_key
|
||||||
AND blocks.block_number = transaction_cids.block_number
|
AND blocks.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.tx_hash = $1
|
||||||
AND transaction_cids.block_number = header_cids.block_number
|
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||||
AND transaction_cids.tx_hash = $1`
|
|
||||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||||
WHERE state_accounts.header_id = state_cids.header_id
|
WHERE state_accounts.header_id = state_cids.header_id
|
||||||
AND state_accounts.state_path = state_cids.state_path
|
AND state_accounts.state_path = state_cids.state_path
|
||||||
@ -524,20 +523,30 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
|||||||
// GetTransaction retrieves a tx by hash
|
// GetTransaction retrieves a tx by hash
|
||||||
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
|
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
|
||||||
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
||||||
var tempTxStruct struct {
|
type txRes struct {
|
||||||
Data []byte `db:"data"`
|
Data []byte `db:"data"`
|
||||||
BlockHash string `db:"block_hash"`
|
HeaderID string `db:"header_id"`
|
||||||
BlockNumber uint64 `db:"block_number"`
|
BlockNumber uint64 `db:"block_number"`
|
||||||
Index uint64 `db:"index"`
|
Index uint64 `db:"index"`
|
||||||
}
|
}
|
||||||
if err := b.DB.Get(&tempTxStruct, RetrieveRPCTransaction, txHash.String()); err != nil {
|
var res = make([]txRes, 0)
|
||||||
|
if err := b.DB.Select(&res, RetrieveRPCTransaction, txHash.String()); err != nil {
|
||||||
return nil, common.Hash{}, 0, 0, err
|
return nil, common.Hash{}, 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(res) == 0 {
|
||||||
|
return nil, common.Hash{}, 0, 0, errTxHashNotFound
|
||||||
|
} else if len(res) > 1 {
|
||||||
|
// a transaction can be part of a only one canonical block
|
||||||
|
return nil, common.Hash{}, 0, 0, errTxHashInMultipleBlocks
|
||||||
|
}
|
||||||
|
|
||||||
var transaction types.Transaction
|
var transaction types.Transaction
|
||||||
if err := transaction.UnmarshalBinary(tempTxStruct.Data); err != nil {
|
if err := transaction.UnmarshalBinary(res[0].Data); err != nil {
|
||||||
return nil, common.Hash{}, 0, 0, err
|
return nil, common.Hash{}, 0, 0, err
|
||||||
}
|
}
|
||||||
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
|
|
||||||
|
return &transaction, common.HexToHash(res[0].HeaderID), res[0].BlockNumber, res[0].Index, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReceipts retrieves receipts for provided block hash
|
// GetReceipts retrieves receipts for provided block hash
|
||||||
|
@ -191,7 +191,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}
|
}
|
||||||
// Retrieve cached receipt CIDs
|
// Retrieve cached receipt CIDs
|
||||||
if !filter.ReceiptFilter.Off {
|
if !filter.ReceiptFilter.Off {
|
||||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes)
|
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, filter.ReceiptFilter, 0, header.BlockHash, trxHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("receipt cid retrieval error")
|
log.Error("receipt cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -257,8 +257,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
|
|||||||
results := make([]models.TxModel, 0)
|
results := make([]models.TxModel, 0)
|
||||||
id := 1
|
id := 1
|
||||||
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
||||||
transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
transaction_cids.header_id, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||||
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data, transaction_cids.tx_type
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
transaction_cids.header_id = header_cids.block_hash
|
transaction_cids.header_id = header_cids.block_hash
|
||||||
@ -358,29 +358,6 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
|||||||
return pgStr, args
|
return pgStr, args
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
|
||||||
// filter parameters and correspond to the provided tx ids
|
|
||||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
|
||||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
|
||||||
args := make([]interface{}, 0, 4)
|
|
||||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
|
||||||
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
|
||||||
AND transaction_cids.block_number = header_cids.block_number
|
|
||||||
AND header_cids.block_hash = $1`
|
|
||||||
id := 2
|
|
||||||
args = append(args, headerID)
|
|
||||||
|
|
||||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
|
|
||||||
|
|
||||||
pgStr += ` ORDER BY transaction_cids.index`
|
|
||||||
receiptCIDs := make([]models.ReceiptModel, 0)
|
|
||||||
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||||
// filter parameters.
|
// filter parameters.
|
||||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
@ -388,12 +365,15 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
|||||||
args := make([]interface{}, 0, 4)
|
args := make([]interface{}, 0, 4)
|
||||||
id := 1
|
id := 1
|
||||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index, data,
|
||||||
|
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
AND transaction_cids.block_number = header_cids.block_number
|
AND transaction_cids.block_number = header_cids.block_number
|
||||||
@ -416,19 +396,21 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
|||||||
return logCIDs, nil
|
return logCIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
|
// RetrieveFilteredLog retrieves and returns all the log CIDs provided blockHeight or blockHash that conform to the provided
|
||||||
// filter parameters.
|
// filter parameters.
|
||||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
log.Debug("retrieving log cids for receipt ids")
|
log.Debug("retrieving log cids for receipt ids")
|
||||||
args := make([]interface{}, 0, 4)
|
args := make([]interface{}, 0, 4)
|
||||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||||
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
AND transaction_cids.block_number = header_cids.block_number`
|
AND transaction_cids.block_number = header_cids.block_number`
|
||||||
@ -458,13 +440,14 @@ func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilte
|
|||||||
|
|
||||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||||
// filter parameters and correspond to the provided tx ids
|
// filter parameters and correspond to the provided tx ids
|
||||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash string, txHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||||
args := make([]interface{}, 0, 5)
|
args := make([]interface{}, 0, 5)
|
||||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id,
|
||||||
receipt_cids.leaf_mh_key,
|
receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
AND transaction_cids.header_id = header_cids.block_hash
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
AND transaction_cids.block_number = header_cids.block_number`
|
AND transaction_cids.block_number = header_cids.block_number`
|
||||||
@ -474,9 +457,9 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
|
|||||||
args = append(args, blockNumber)
|
args = append(args, blockNumber)
|
||||||
id++
|
id++
|
||||||
}
|
}
|
||||||
if blockHash != nil {
|
if blockHash != "" {
|
||||||
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||||
args = append(args, blockHash.String())
|
args = append(args, blockHash)
|
||||||
id++
|
id++
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -609,7 +592,7 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
|||||||
txHashes[i] = txCID.TxHash
|
txHashes[i] = txCID.TxHash
|
||||||
}
|
}
|
||||||
var rctCIDs []models.ReceiptModel
|
var rctCIDs []models.ReceiptModel
|
||||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID.BlockHash, txHashes, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("rct cid retrieval error")
|
log.Error("rct cid retrieval error")
|
||||||
}
|
}
|
||||||
@ -662,7 +645,7 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
|||||||
txHashes[i] = txCID.TxHash
|
txHashes[i] = txCID.TxHash
|
||||||
}
|
}
|
||||||
var rctCIDs []models.ReceiptModel
|
var rctCIDs []models.ReceiptModel
|
||||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID[0].BlockHash, txHashes, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("rct cid retrieval error")
|
log.Error("rct cid retrieval error")
|
||||||
}
|
}
|
||||||
@ -691,18 +674,21 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string,
|
|||||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
// RetrieveReceiptCIDsByByHeaderIDAndTxIDs retrieves receipt CIDs by their associated tx IDs for the given header id
|
||||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx *sqlx.Tx, headerID string, txHashes []string, blockNumber int64) ([]models.ReceiptModel, error) {
|
||||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||||
receipt_cids.contract, receipt_cids.contract_hash
|
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||||
FROM eth.receipt_cids, eth.transaction_cids
|
FROM eth.receipt_cids, eth.transaction_cids
|
||||||
WHERE tx_id = ANY($1)
|
WHERE tx_id = ANY($2)
|
||||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
|
AND transaction_cids.header_id = $1
|
||||||
|
AND transaction_cids.block_number = $3
|
||||||
ORDER BY transaction_cids.index`
|
ORDER BY transaction_cids.index`
|
||||||
var rctCIDs []models.ReceiptModel
|
var rctCIDs []models.ReceiptModel
|
||||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
return rctCIDs, tx.Select(&rctCIDs, pgStr, headerID, pq.Array(txHashes), blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||||
@ -729,33 +715,45 @@ func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64)
|
|||||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
||||||
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||||
|
|
||||||
var headerCID HeaderCIDRecord
|
var headerCIDs []HeaderCIDRecord
|
||||||
|
|
||||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||||
}).Joins("IPLD").First(&headerCID, "block_hash = ?", blockHash.String()).Error
|
}).Joins("IPLD").Find(&headerCIDs, "block_hash = ?", blockHash.String()).Error
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return headerCID, err
|
return HeaderCIDRecord{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return headerCID, nil
|
if len(headerCIDs) == 0 {
|
||||||
|
return HeaderCIDRecord{}, errHeaderHashNotFound
|
||||||
|
} else if len(headerCIDs) > 1 {
|
||||||
|
return HeaderCIDRecord{}, errMultipleHeadersForHash
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerCIDs[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
||||||
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
||||||
log.Debug("retrieving tx cid for tx hash ", txHash)
|
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||||
|
|
||||||
var txCID TransactionCIDRecord
|
var txCIDs []TransactionCIDRecord
|
||||||
|
|
||||||
err := ecr.gormDB.Joins("IPLD").First(&txCID, "tx_hash = ?", txHash).Error
|
err := ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return txCID, err
|
return TransactionCIDRecord{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return txCID, nil
|
if len(txCIDs) == 0 {
|
||||||
|
return TransactionCIDRecord{}, errTxHashNotFound
|
||||||
|
} else if len(txCIDs) > 1 {
|
||||||
|
// a transaction can be part of a only one canonical block
|
||||||
|
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
return txCIDs[0], nil
|
||||||
}
|
}
|
||||||
|
@ -18,10 +18,12 @@ package eth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -47,7 +49,7 @@ const (
|
|||||||
header_cids.mh_key = blocks.key
|
header_cids.mh_key = blocks.key
|
||||||
AND header_cids.block_number = blocks.block_number
|
AND header_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_number = $1`
|
WHERE header_cids.block_number = $1`
|
||||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
@ -72,7 +74,7 @@ const (
|
|||||||
uncle_cids.mh_key = blocks.key
|
uncle_cids.mh_key = blocks.key
|
||||||
AND uncle_cids.block_number = blocks.block_number
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_hash = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
@ -83,7 +85,7 @@ const (
|
|||||||
uncle_cids.mh_key = blocks.key
|
uncle_cids.mh_key = blocks.key
|
||||||
AND uncle_cids.block_number = blocks.block_number
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_number = $1`
|
WHERE header_cids.block_number = $1`
|
||||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
@ -91,7 +93,7 @@ const (
|
|||||||
AND uncle_cids.block_number = blocks.block_number
|
AND uncle_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveTransactionsByHashesPgStr = `SELECT cid, data
|
RetrieveTransactionsByHashesPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
transaction_cids.mh_key = blocks.key
|
transaction_cids.mh_key = blocks.key
|
||||||
@ -120,9 +122,10 @@ const (
|
|||||||
transaction_cids.mh_key = blocks.key
|
transaction_cids.mh_key = blocks.key
|
||||||
AND transaction_cids.block_number = blocks.block_number
|
AND transaction_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_number = $1
|
WHERE header_cids.block_number = $1
|
||||||
|
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveTransactionByHashPgStr = `SELECT cid, data
|
RetrieveTransactionByHashPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
transaction_cids.mh_key = blocks.key
|
transaction_cids.mh_key = blocks.key
|
||||||
@ -133,17 +136,20 @@ const (
|
|||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (
|
INNER JOIN eth.transaction_cids ON (
|
||||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
receipt_cids.leaf_mh_key = blocks.key
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
AND receipt_cids.block_number = blocks.block_number
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
WHERE tx_hash = ANY($1::VARCHAR(66)[])
|
||||||
|
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (
|
INNER JOIN eth.transaction_cids ON (
|
||||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
@ -160,6 +166,7 @@ const (
|
|||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (
|
INNER JOIN eth.transaction_cids ON (
|
||||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
@ -170,29 +177,28 @@ const (
|
|||||||
receipt_cids.leaf_mh_key = blocks.key
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
AND receipt_cids.block_number = blocks.block_number
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE block_number = $1
|
WHERE header_cids.block_number = $1
|
||||||
|
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (
|
INNER JOIN eth.transaction_cids ON (
|
||||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND receipt_cids.header_id = transaction_cids.header_id
|
||||||
AND receipt_cids.block_number = transaction_cids.block_number
|
AND receipt_cids.block_number = transaction_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
INNER JOIN public.blocks ON (
|
||||||
receipt_cids.leaf_mh_key = blocks.key
|
receipt_cids.leaf_mh_key = blocks.key
|
||||||
AND receipt_cids.block_number = blocks.block_number
|
AND receipt_cids.block_number = blocks.block_number
|
||||||
)
|
)
|
||||||
WHERE tx_hash = $1`
|
WHERE tx_hash = $1
|
||||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||||
|
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.block_number, state_cids.node_type
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
state_cids.header_id = header_cids.block_hash
|
state_cids.header_id = header_cids.block_hash
|
||||||
AND state_cids.block_number = header_cids.block_number
|
AND state_cids.block_number = header_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
|
||||||
state_cids.mh_key = blocks.key
|
|
||||||
AND state_cids.block_number = blocks.block_number
|
|
||||||
)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND header_cids.block_number <= (SELECT block_number
|
AND header_cids.block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
@ -200,21 +206,17 @@ const (
|
|||||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||||
ORDER BY header_cids.block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.node_type
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (
|
INNER JOIN eth.header_cids ON (
|
||||||
state_cids.header_id = header_cids.block_hash
|
state_cids.header_id = header_cids.block_hash
|
||||||
AND state_cids.block_number = header_cids.block_number
|
AND state_cids.block_number = header_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
|
||||||
state_cids.mh_key = blocks.key
|
|
||||||
AND state_cids.block_number = blocks.block_number
|
|
||||||
)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND block_number <= $2
|
AND header_cids.block_number <= $2
|
||||||
ORDER BY block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||||
FROM eth.storage_cids
|
FROM eth.storage_cids
|
||||||
INNER JOIN eth.state_cids ON (
|
INNER JOIN eth.state_cids ON (
|
||||||
storage_cids.header_id = state_cids.header_id
|
storage_cids.header_id = state_cids.header_id
|
||||||
@ -225,16 +227,12 @@ const (
|
|||||||
state_cids.header_id = header_cids.block_hash
|
state_cids.header_id = header_cids.block_hash
|
||||||
AND state_cids.block_number = header_cids.block_number
|
AND state_cids.block_number = header_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
|
||||||
storage_cids.mh_key = blocks.key
|
|
||||||
AND storage_cids.block_number = blocks.block_number
|
|
||||||
)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND block_number <= $3
|
AND header_cids.block_number <= $3
|
||||||
ORDER BY block_number DESC
|
ORDER BY header_cids.block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.block_number, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||||
FROM eth.storage_cids
|
FROM eth.storage_cids
|
||||||
INNER JOIN eth.state_cids ON (
|
INNER JOIN eth.state_cids ON (
|
||||||
storage_cids.header_id = state_cids.header_id
|
storage_cids.header_id = state_cids.header_id
|
||||||
@ -245,10 +243,6 @@ const (
|
|||||||
state_cids.header_id = header_cids.block_hash
|
state_cids.header_id = header_cids.block_hash
|
||||||
AND state_cids.block_number = header_cids.block_number
|
AND state_cids.block_number = header_cids.block_number
|
||||||
)
|
)
|
||||||
INNER JOIN public.blocks ON (
|
|
||||||
storage_cids.mh_key = blocks.key
|
|
||||||
AND storage_cids.block_number = blocks.block_number
|
|
||||||
)
|
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND header_cids.block_number <= (SELECT block_number
|
AND header_cids.block_number <= (SELECT block_number
|
||||||
@ -535,6 +529,8 @@ func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte,
|
|||||||
|
|
||||||
type nodeInfo struct {
|
type nodeInfo struct {
|
||||||
CID string `db:"cid"`
|
CID string `db:"cid"`
|
||||||
|
MhKey string `db:"mh_key"`
|
||||||
|
BlockNumber string `db:"block_number"`
|
||||||
Data []byte `db:"data"`
|
Data []byte `db:"data"`
|
||||||
NodeType int `db:"node_type"`
|
NodeType int `db:"node_type"`
|
||||||
StateLeafRemoved bool `db:"state_leaf_removed"`
|
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||||
@ -553,6 +549,15 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Addr
|
|||||||
return "", EmptyNodeValue, nil
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blockNumber, err := strconv.ParseUint(accountResult.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, blockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||||
@ -576,6 +581,12 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
|
|||||||
return "", EmptyNodeValue, nil
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, number)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||||
@ -597,6 +608,16 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(add
|
|||||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||||
return "", EmptyNodeValue, EmptyNodeValue, nil
|
return "", EmptyNodeValue, EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blockNumber, err := strconv.ParseUint(storageResult.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, blockNumber)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||||
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||||
@ -620,6 +641,13 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(ad
|
|||||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||||
return "", EmptyNodeValue, nil
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, number)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||||
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||||
|
@ -301,6 +301,7 @@ var (
|
|||||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||||
{
|
{
|
||||||
BlockNumber: "1",
|
BlockNumber: "1",
|
||||||
|
HeaderID: MockBlock.Hash().String(),
|
||||||
LeafCID: Rct1CID.String(),
|
LeafCID: Rct1CID.String(),
|
||||||
LeafMhKey: Rct1MhKey,
|
LeafMhKey: Rct1MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
@ -308,6 +309,7 @@ var (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockNumber: "1",
|
BlockNumber: "1",
|
||||||
|
HeaderID: MockBlock.Hash().String(),
|
||||||
LeafCID: Rct2CID.String(),
|
LeafCID: Rct2CID.String(),
|
||||||
LeafMhKey: Rct2MhKey,
|
LeafMhKey: Rct2MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
@ -315,6 +317,7 @@ var (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockNumber: "1",
|
BlockNumber: "1",
|
||||||
|
HeaderID: MockBlock.Hash().String(),
|
||||||
LeafCID: Rct3CID.String(),
|
LeafCID: Rct3CID.String(),
|
||||||
LeafMhKey: Rct3MhKey,
|
LeafMhKey: Rct3MhKey,
|
||||||
Contract: ContractAddress.String(),
|
Contract: ContractAddress.String(),
|
||||||
@ -322,6 +325,7 @@ var (
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockNumber: "1",
|
BlockNumber: "1",
|
||||||
|
HeaderID: MockBlock.Hash().String(),
|
||||||
LeafCID: Rct4CID.String(),
|
LeafCID: Rct4CID.String(),
|
||||||
LeafMhKey: Rct4MhKey,
|
LeafMhKey: Rct4MhKey,
|
||||||
Contract: "",
|
Contract: "",
|
||||||
|
@ -55,6 +55,13 @@ func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint6
|
|||||||
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FetchIPLD is used to retrieve an IPLD from Postgres mhkey and blockNumber
|
||||||
|
func FetchIPLD(db *sqlx.DB, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||||
|
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||||
|
var block []byte
|
||||||
|
return block, db.Get(&block, pgStr, mhKey, blockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||||
func MultihashKeyFromCID(c cid.Cid) string {
|
func MultihashKeyFromCID(c cid.Cid) string {
|
||||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||||
|
@ -1,26 +1,16 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
# Remove any existing containers / volumes
|
||||||
|
docker-compose down --remove-orphans --volumes
|
||||||
|
|
||||||
mkdir -p out
|
# Spin up DB and run migrations
|
||||||
|
docker-compose up -d migrations ipld-eth-db
|
||||||
# Remove existing docker-tsdb directory
|
sleep 30
|
||||||
rm -rf out/docker-tsdb/
|
|
||||||
|
|
||||||
# Copy over files to setup TimescaleDB
|
|
||||||
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
|
|
||||||
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
|
|
||||||
docker rm -v $ID
|
|
||||||
|
|
||||||
# Spin up TimescaleDB
|
|
||||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml up ipld-eth-db
|
|
||||||
trap "docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes; rm -rf out/" SIGINT SIGTERM ERR
|
|
||||||
sleep 45
|
|
||||||
|
|
||||||
# Run unit tests
|
# Run unit tests
|
||||||
go clean -testcache
|
go clean -testcache
|
||||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8066 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing_v4 make test
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes
|
docker-compose down --remove-orphans --volumes
|
||||||
rm -rf out/
|
rm -rf out/
|
||||||
|
@ -4,22 +4,22 @@
|
|||||||
|
|
||||||
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||||
|
|
||||||
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.1.1-alpha) in ipld-eth-db repo.
|
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.2.0-alpha) in ipld-eth-db repo.
|
||||||
```bash
|
```bash
|
||||||
# In ipld-eth-db repo.
|
# In ipld-eth-db repo.
|
||||||
git checkout v4.1.1-alpha
|
git checkout v4.2.0-alpha
|
||||||
```
|
```
|
||||||
|
|
||||||
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.18-statediff-4.0.2-alpha) in go-ethereum repo.
|
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.19-statediff-4.1.0-alpha) in go-ethereum repo.
|
||||||
```bash
|
```bash
|
||||||
# In go-ethereum repo.
|
# In go-ethereum repo.
|
||||||
git checkout v1.10.18-statediff-4.0.2-alpha
|
git checkout v1.10.19-statediff-4.1.0-alpha
|
||||||
```
|
```
|
||||||
|
|
||||||
- Checkout working commit in stack-orchestrator repo.
|
- Checkout working commit in stack-orchestrator repo.
|
||||||
```bash
|
```bash
|
||||||
# In stack-orchestrator repo.
|
# In stack-orchestrator repo.
|
||||||
git checkout 418957a1f745c921b21286c13bb033f922a91ae9
|
git checkout f2fd766f5400fcb9eb47b50675d2e3b1f2753702
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run
|
## Run
|
||||||
@ -60,13 +60,11 @@
|
|||||||
# Path to test contract.
|
# Path to test contract.
|
||||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||||
|
|
||||||
|
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||||
db_write=true
|
db_write=true
|
||||||
eth_forward_eth_calls=false
|
eth_forward_eth_calls=false
|
||||||
eth_proxy_on_error=false
|
eth_proxy_on_error=false
|
||||||
eth_http_path="go-ethereum:8545"
|
eth_http_path="go-ethereum:8545"
|
||||||
ipld_eth_server_db_dependency=access-node
|
|
||||||
go_ethereum_db_dependency=access-node
|
|
||||||
connecting_db_name=vulcanize_testing_v4
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- Run stack-orchestrator:
|
- Run stack-orchestrator:
|
||||||
@ -77,8 +75,7 @@
|
|||||||
|
|
||||||
./wrapper.sh \
|
./wrapper.sh \
|
||||||
-e docker \
|
-e docker \
|
||||||
-d ../docker/latest/docker-compose-timescale-db.yml \
|
-d ../docker/local/docker-compose-db-sharding.yml \
|
||||||
-d ../docker/local/docker-compose-db-migration.yml \
|
|
||||||
-d ../docker/local/docker-compose-go-ethereum.yml \
|
-d ../docker/local/docker-compose-go-ethereum.yml \
|
||||||
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
||||||
-d ../docker/local/docker-compose-contract.yml \
|
-d ../docker/local/docker-compose-contract.yml \
|
||||||
@ -107,13 +104,11 @@
|
|||||||
# Path to test contract.
|
# Path to test contract.
|
||||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||||
|
|
||||||
|
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||||
db_write=false
|
db_write=false
|
||||||
eth_forward_eth_calls=true
|
eth_forward_eth_calls=true
|
||||||
eth_proxy_on_error=false
|
eth_proxy_on_error=false
|
||||||
eth_http_path="go-ethereum:8545"
|
eth_http_path="go-ethereum:8545"
|
||||||
ipld_eth_server_db_dependency=access-node
|
|
||||||
go_ethereum_db_dependency=access-node
|
|
||||||
connecting_db_name=vulcanize_testing_v4
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- Stop the stack-orchestrator and start again using the same command
|
- Stop the stack-orchestrator and start again using the same command
|
||||||
|
Loading…
Reference in New Issue
Block a user