Compare commits
11 Commits
v4.0.4-alp
...
sharding
Author | SHA1 | Date | |
---|---|---|---|
|
b90c28906a | ||
|
6cb54ca790 | ||
|
bcd00c6e2b | ||
923a8c3241 | |||
07a991d3e4 | |||
|
a63640933e | ||
|
a5ac6c19cf | ||
|
45c5a25d54 | ||
|
72e903cf68 | ||
|
b7b4631231 | ||
|
f8de0defad |
4
.github/workflows/on-pr-publish.yaml
vendored
4
.github/workflows/on-pr-publish.yaml
vendored
@ -27,6 +27,10 @@ jobs:
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
with:
|
||||
STACK_ORCHESTRATOR_REF: "f2fd766f5400fcb9eb47b50675d2e3b1f2753702"
|
||||
GO_ETHEREUM_REF: "7b4ef34de2b9469c3f82972b60e38b34c99c5382"
|
||||
IPLD_ETH_DB_REF: "b59505eab252670c622b42ce60621e9747fb64f9"
|
||||
build:
|
||||
name: Run docker build
|
||||
runs-on: ubuntu-latest
|
||||
|
21
.github/workflows/run_unit_test.sh
vendored
21
.github/workflows/run_unit_test.sh
vendored
@ -8,30 +8,21 @@ temp_dir=$(mktemp -d)
|
||||
cd $temp_dir
|
||||
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
||||
cd ipld-eth-server
|
||||
mkdir -p out
|
||||
|
||||
## Remove the branch and github related info. This way future runs wont be confused.
|
||||
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||
|
||||
# Remove existing docker-tsdb directory
|
||||
rm -rf out/docker-tsdb/
|
||||
|
||||
# Copy over files to setup TimescaleDB
|
||||
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
|
||||
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
|
||||
docker rm -v $ID
|
||||
|
||||
# Spin up TimescaleDB
|
||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml up ipld-eth-db
|
||||
trap "docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||
sleep 45
|
||||
# Spin up DB and run migrations
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
trap "docker-compose down -v --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||
sleep 30
|
||||
|
||||
# Remove old logs so there's no confusion, then run test
|
||||
rm -f /tmp/test.log /tmp/return_test.txt
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8066 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing_v4 make test > /tmp/test.log
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
||||
echo $? > /tmp/return_test.txt
|
||||
|
||||
# Clean up
|
||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes
|
||||
docker-compose down -v --remove-orphans
|
||||
cd $start_dir
|
||||
rm -fr $temp_dir
|
||||
|
50
.github/workflows/tests.yaml
vendored
50
.github/workflows/tests.yaml
vendored
@ -8,6 +8,16 @@ on:
|
||||
required: true
|
||||
BUILD_KEY:
|
||||
required: true
|
||||
inputs:
|
||||
STACK_ORCHESTRATOR_REF:
|
||||
required: true
|
||||
type: string
|
||||
GO_ETHEREUM_REF:
|
||||
required: true
|
||||
type: string
|
||||
IPLD_ETH_DB_REF:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -41,8 +51,8 @@ jobs:
|
||||
echo ${{ env.BUILD_KEY }} >> /tmp/key
|
||||
echo "-----END OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||
chmod 400 /tmp/key
|
||||
cat /tmp/git_repository
|
||||
cat /tmp/git_head_ref
|
||||
cat /tmp/git_repository
|
||||
cat /tmp/git_head_ref
|
||||
|
||||
- name: Raw SCP
|
||||
run: |
|
||||
@ -68,9 +78,6 @@ jobs:
|
||||
integrationtest:
|
||||
name: Run integration tests
|
||||
env:
|
||||
STACK_ORCHESTRATOR_REF: 418957a1f745c921b21286c13bb033f922a91ae9
|
||||
GO_ETHEREUM_REF: "v1.10.18-statediff-4.0.2-alpha"
|
||||
IPLD_ETH_DB_REF: 91d30b9ea1acecd0a7f4307390a98bf3e289b8d7
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: true
|
||||
ETH_FORWARD_ETH_CALLS: false
|
||||
@ -89,17 +96,17 @@ jobs:
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.IPLD_ETH_DB_REF }}
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: vulcanize/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
@ -108,15 +115,11 @@ jobs:
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||
echo ipld_eth_server_db_dependency=access-node >> ./config.sh
|
||||
echo go_ethereum_db_dependency=access-node >> ./config.sh
|
||||
echo connecting_db_name=vulcanize_testing_v4 >> ./config.sh
|
||||
echo watched_address_gap_filler_enabled=false >> ./config.sh
|
||||
echo watched_address_gap_filler_interval=5 >> ./config.sh
|
||||
cat ./config.sh
|
||||
- name: Build geth
|
||||
run: |
|
||||
@ -127,11 +130,10 @@ jobs:
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-migration.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-timescale-db.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
@ -144,9 +146,6 @@ jobs:
|
||||
integrationtest_forwardethcalls:
|
||||
name: Run integration tests for direct proxy fall-through of eth_calls
|
||||
env:
|
||||
STACK_ORCHESTRATOR_REF: 418957a1f745c921b21286c13bb033f922a91ae9
|
||||
GO_ETHEREUM_REF: "v1.10.18-statediff-4.0.2-alpha"
|
||||
IPLD_ETH_DB_REF: 91d30b9ea1acecd0a7f4307390a98bf3e289b8d7
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: false
|
||||
ETH_FORWARD_ETH_CALLS: true
|
||||
@ -165,17 +164,17 @@ jobs:
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.IPLD_ETH_DB_REF }}
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: vulcanize/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
@ -184,15 +183,11 @@ jobs:
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||
echo ipld_eth_server_db_dependency=access-node >> ./config.sh
|
||||
echo go_ethereum_db_dependency=access-node >> ./config.sh
|
||||
echo connecting_db_name=vulcanize_testing_v4 >> ./config.sh
|
||||
echo watched_address_gap_filler_enabled=false >> ./config.sh
|
||||
echo watched_address_gap_filler_interval=5 >> ./config.sh
|
||||
cat ./config.sh
|
||||
- name: Build geth
|
||||
run: |
|
||||
@ -203,11 +198,10 @@ jobs:
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-migration.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-timescale-db.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
|
@ -22,13 +22,13 @@ Additional, unique endpoints are exposed which utilize the new indexes and state
|
||||
|
||||
## Dependencies
|
||||
Minimal build dependencies
|
||||
* Go (1.13)
|
||||
* Go (1.18)
|
||||
* Git
|
||||
* GCC compiler
|
||||
* This repository
|
||||
|
||||
External dependency
|
||||
* Postgres database populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
|
||||
* Postgres database populated by [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db)
|
||||
|
||||
## Install
|
||||
Start by downloading ipld-eth-server and moving into the repo:
|
||||
|
@ -1,18 +1,29 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
ipld-eth-db:
|
||||
migrations:
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- access-node
|
||||
image: vulcanize/ipld-eth-db:v4.1.1-alpha
|
||||
- ipld-eth-db
|
||||
image: vulcanize/ipld-eth-db:v4.2.0-alpha
|
||||
environment:
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_NAME: "vulcanize_testing_v4"
|
||||
DATABASE_NAME: "vulcanize_testing"
|
||||
DATABASE_PASSWORD: "password"
|
||||
DATABASE_HOSTNAME: "access-node"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
|
||||
ipld-eth-db:
|
||||
image: timescale/timescaledb:latest-pg14
|
||||
restart: always
|
||||
command: ["postgres", "-c", "log_statement=all"]
|
||||
environment:
|
||||
POSTGRES_USER: "vdbm"
|
||||
POSTGRES_DB: "vulcanize_testing"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
ports:
|
||||
- "127.0.0.1:8077:5432"
|
||||
|
||||
eth-server:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
@ -44,20 +55,5 @@ services:
|
||||
ports:
|
||||
- "127.0.0.1:8081:8081"
|
||||
|
||||
graphql:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: vulcanize/postgraphile:v1.0.1
|
||||
environment:
|
||||
- PG_HOST=db
|
||||
- PG_PORT=5432
|
||||
- PG_DATABASE=vulcanize_public
|
||||
- PG_USER=vdbm
|
||||
- PG_PASSWORD=password
|
||||
- SCHEMA=public,eth
|
||||
ports:
|
||||
- "127.0.0.1:5000:5000"
|
||||
|
||||
volumes:
|
||||
vdb_db_eth_server:
|
||||
|
2
go.mod
2
go.mod
@ -289,4 +289,4 @@ require (
|
||||
lukechampine.com/blake3 v1.1.6 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha
|
||||
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha
|
||||
|
4
go.sum
4
go.sum
@ -1662,8 +1662,8 @@ github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha h1:sDDK4eOdW3JEds+
|
||||
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha/go.mod h1:/pHfZd1IWsSTpCtGq6nnzUZBAkLV+zMrRh6Z3Hr3NFc=
|
||||
github.com/vulcanize/gap-filler v0.4.0 h1:5VD9PG7UrjEub4rLxZmstWoHnBnVtXz9silIVdrnTsM=
|
||||
github.com/vulcanize/gap-filler v0.4.0/go.mod h1:5awUyotIoJi6AuG0JPEm7SIwFZBD7Ecg0I8x7CdxcHI=
|
||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha h1:xD4fA2khoAnhBEk84JwrIEGvQCndVXpQGv5n7a9cgwc=
|
||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.0.2-alpha/go.mod h1:5tMN+CDbK/qI2UlfN307HJykDmVIOCB1FM5RcHK9Kp8=
|
||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha h1:8ge2ban6t/e53XDwe6s28jcCevT7Ggo51lNJ0Eo1PgA=
|
||||
github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha/go.mod h1:5tMN+CDbK/qI2UlfN307HJykDmVIOCB1FM5RcHK9Kp8=
|
||||
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha h1:xak1uYmFWqJ2Hz3pM+0jDcqdlwYwRWeSkQV6B8IxD/0=
|
||||
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha/go.mod h1:pHbLbW4Hk1IFpxrY9yi50IuoPPzmSY7lwOqpFAa369k=
|
||||
github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE=
|
||||
|
@ -156,6 +156,7 @@ func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
|
||||
// * When fullTx is true all transactions in the block are returned, otherwise
|
||||
// only the transaction hash is returned.
|
||||
func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
|
||||
logrus.Debug("Received getBlockByNumber request for number ", number.Int64())
|
||||
block, err := pea.B.BlockByNumber(ctx, number)
|
||||
if block != nil && err == nil {
|
||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||
@ -1096,11 +1097,13 @@ func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
|
||||
func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
|
||||
fields, err := RPCMarshalBlock(b, inclTx, fullTx)
|
||||
if err != nil {
|
||||
logrus.Error("error RPC marshalling block with hash", b.Hash().String(), err)
|
||||
return nil, err
|
||||
}
|
||||
if inclTx {
|
||||
td, err := pea.B.GetTd(b.Hash())
|
||||
if err != nil {
|
||||
logrus.Error("error getting td for block with hash and number", b.Hash().String(), b.Number().String(), err)
|
||||
return nil, err
|
||||
}
|
||||
fields["totalDifficulty"] = (*hexutil.Big)(td)
|
||||
|
@ -52,22 +52,22 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errPendingBlockNumber = errors.New("pending block number not supported")
|
||||
errNegativeBlockNumber = errors.New("negative block number not supported")
|
||||
errHeaderHashNotFound = errors.New("header for hash not found")
|
||||
errHeaderNotFound = errors.New("header not found")
|
||||
errPendingBlockNumber = errors.New("pending block number not supported")
|
||||
errNegativeBlockNumber = errors.New("negative block number not supported")
|
||||
errHeaderHashNotFound = errors.New("header for hash not found")
|
||||
errHeaderNotFound = errors.New("header not found")
|
||||
errMultipleHeadersForHash = errors.New("more than one headers for the given hash")
|
||||
errTxHashNotFound = errors.New("transaction for hash not found")
|
||||
errTxHashInMultipleBlocks = errors.New("transaction for hash found in more than one canonical block")
|
||||
|
||||
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
||||
// to contain a 65 byte secp256k1 signature.
|
||||
)
|
||||
|
||||
const (
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash
|
||||
FROM canonical_header_hash($1) AS block_hash
|
||||
WHERE block_hash IS NOT NULL`
|
||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
@ -76,13 +76,12 @@ const (
|
||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, transaction_cids.block_number, index
|
||||
FROM public.blocks, eth.transaction_cids, eth.header_cids
|
||||
RetrieveRPCTransaction = `SELECT blocks.data, header_id, transaction_cids.block_number, index
|
||||
FROM public.blocks, eth.transaction_cids
|
||||
WHERE blocks.key = transaction_cids.mh_key
|
||||
AND blocks.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND transaction_cids.tx_hash = $1`
|
||||
AND transaction_cids.tx_hash = $1
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||
WHERE state_accounts.header_id = state_cids.header_id
|
||||
AND state_accounts.state_path = state_cids.state_path
|
||||
@ -342,6 +341,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
log.Error("error fetching header ipld", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -352,10 +352,12 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch and decode the uncle IPLDs
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching uncle iplds", err)
|
||||
return nil, err
|
||||
}
|
||||
var uncles []*types.Header
|
||||
@ -367,10 +369,12 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
}
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
|
||||
// Fetch and decode the transaction IPLDs
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching tx iplds", err)
|
||||
return nil, err
|
||||
}
|
||||
var transactions []*types.Transaction
|
||||
@ -386,6 +390,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching rct iplds", err)
|
||||
return nil, err
|
||||
}
|
||||
var receipts []*types.Receipt
|
||||
@ -438,6 +443,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
log.Error("error fetching header ipld", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -452,6 +458,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching uncle iplds", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -470,6 +477,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching tx iplds", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -488,6 +496,7 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
log.Error("error fetching rct iplds", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
@ -514,20 +523,30 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
// GetTransaction retrieves a tx by hash
|
||||
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
|
||||
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
||||
var tempTxStruct struct {
|
||||
type txRes struct {
|
||||
Data []byte `db:"data"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
HeaderID string `db:"header_id"`
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
Index uint64 `db:"index"`
|
||||
}
|
||||
if err := b.DB.Get(&tempTxStruct, RetrieveRPCTransaction, txHash.String()); err != nil {
|
||||
var res = make([]txRes, 0)
|
||||
if err := b.DB.Select(&res, RetrieveRPCTransaction, txHash.String()); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
|
||||
if len(res) == 0 {
|
||||
return nil, common.Hash{}, 0, 0, errTxHashNotFound
|
||||
} else if len(res) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return nil, common.Hash{}, 0, 0, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
var transaction types.Transaction
|
||||
if err := transaction.UnmarshalBinary(tempTxStruct.Data); err != nil {
|
||||
if err := transaction.UnmarshalBinary(res[0].Data); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
|
||||
|
||||
return &transaction, common.HexToHash(res[0].HeaderID), res[0].BlockNumber, res[0].Index, nil
|
||||
}
|
||||
|
||||
// GetReceipts retrieves receipts for provided block hash
|
||||
|
@ -191,7 +191,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
}
|
||||
// Retrieve cached receipt CIDs
|
||||
if !filter.ReceiptFilter.Off {
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes)
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, filter.ReceiptFilter, 0, header.BlockHash, trxHashes)
|
||||
if err != nil {
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -257,8 +257,8 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
|
||||
results := make([]models.TxModel, 0)
|
||||
id := 1
|
||||
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
||||
transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||
transaction_cids.header_id, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data, transaction_cids.tx_type
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
@ -358,42 +358,22 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND header_cids.block_hash = $1`
|
||||
id := 2
|
||||
args = append(args, headerID)
|
||||
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
|
||||
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
receiptCIDs := make([]models.ReceiptModel, 0)
|
||||
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
||||
}
|
||||
|
||||
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
log.Debug("retrieving log cids for receipt ids with block hash", blockHash.String())
|
||||
args := make([]interface{}, 0, 4)
|
||||
id := 1
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index, data,
|
||||
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
@ -416,19 +396,21 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
||||
return logCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
|
||||
// RetrieveFilteredLog retrieves and returns all the log CIDs provided blockHeight or blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
@ -458,13 +440,14 @@ func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilte
|
||||
|
||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash string, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 5)
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||
receipt_cids.leaf_mh_key,
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id,
|
||||
receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
@ -474,9 +457,9 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
|
||||
args = append(args, blockNumber)
|
||||
id++
|
||||
}
|
||||
if blockHash != nil {
|
||||
if blockHash != "" {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||
args = append(args, blockHash.String())
|
||||
args = append(args, blockHash)
|
||||
id++
|
||||
}
|
||||
|
||||
@ -609,7 +592,7 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID.BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -662,7 +645,7 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID[0].BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -691,18 +674,21 @@ func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string,
|
||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
// RetrieveReceiptCIDsByByHeaderIDAndTxIDs retrieves receipt CIDs by their associated tx IDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx *sqlx.Tx, headerID string, txHashes []string, blockNumber int64) ([]models.ReceiptModel, error) {
|
||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
receipt_cids.contract, receipt_cids.contract_hash
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids
|
||||
WHERE tx_id = ANY($1)
|
||||
WHERE tx_id = ANY($2)
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = $1
|
||||
AND transaction_cids.block_number = $3
|
||||
ORDER BY transaction_cids.index`
|
||||
var rctCIDs []models.ReceiptModel
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, headerID, pq.Array(txHashes), blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||
@ -729,33 +715,46 @@ func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64)
|
||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||
|
||||
var headerCID HeaderCIDRecord
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").First(&headerCID, "block_hash = ?", blockHash.String()).Error
|
||||
}).Joins("IPLD").Find(&headerCIDs, "block_hash = ?", blockHash.String()).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return headerCID, err
|
||||
return HeaderCIDRecord{}, err
|
||||
}
|
||||
|
||||
return headerCID, nil
|
||||
if len(headerCIDs) == 0 {
|
||||
return HeaderCIDRecord{}, errHeaderHashNotFound
|
||||
} else if len(headerCIDs) > 1 {
|
||||
return HeaderCIDRecord{}, errMultipleHeadersForHash
|
||||
}
|
||||
|
||||
return headerCIDs[0], nil
|
||||
}
|
||||
|
||||
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
||||
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||
|
||||
var txCID TransactionCIDRecord
|
||||
var txCIDs []TransactionCIDRecord
|
||||
|
||||
err := ecr.gormDB.Joins("IPLD").First(&txCID, "tx_hash = ?", txHash).Error
|
||||
err := ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return txCID, err
|
||||
return TransactionCIDRecord{}, err
|
||||
}
|
||||
|
||||
return txCID, nil
|
||||
if len(txCIDs) == 0 {
|
||||
return TransactionCIDRecord{}, errTxHashNotFound
|
||||
} else if len(txCIDs) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
return txCIDs[0], nil
|
||||
}
|
||||
|
@ -18,10 +18,12 @@ package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -47,7 +49,7 @@ const (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_number = $1`
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
@ -72,7 +74,7 @@ const (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
@ -83,7 +85,7 @@ const (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_number = $1`
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
@ -91,7 +93,7 @@ const (
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveTransactionsByHashesPgStr = `SELECT cid, data
|
||||
RetrieveTransactionsByHashesPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
@ -120,9 +122,10 @@ const (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_number = $1
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionByHashPgStr = `SELECT cid, data
|
||||
RetrieveTransactionByHashPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
@ -133,17 +136,20 @@ const (
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
@ -160,6 +166,7 @@ const (
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
@ -170,29 +177,28 @@ const (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_number = $1
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
WHERE tx_hash = $1
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.block_number, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
state_cids.mh_key = blocks.key
|
||||
AND state_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
@ -200,21 +206,17 @@ const (
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
state_cids.mh_key = blocks.key
|
||||
AND state_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND block_number <= $2
|
||||
ORDER BY block_number DESC
|
||||
AND header_cids.block_number <= $2
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
@ -225,16 +227,12 @@ const (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
storage_cids.mh_key = blocks.key
|
||||
AND storage_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND block_number <= $3
|
||||
ORDER BY block_number DESC
|
||||
AND header_cids.block_number <= $3
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.block_number, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
@ -245,10 +243,6 @@ const (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
storage_cids.mh_key = blocks.key
|
||||
AND storage_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
@ -535,6 +529,8 @@ func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte,
|
||||
|
||||
type nodeInfo struct {
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
Data []byte `db:"data"`
|
||||
NodeType int `db:"node_type"`
|
||||
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||
@ -553,6 +549,15 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Addr
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(accountResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
@ -576,6 +581,12 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
@ -597,6 +608,16 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(add
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(storageResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
@ -620,6 +641,13 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(ad
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
|
@ -301,6 +301,7 @@ var (
|
||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct1CID.String(),
|
||||
LeafMhKey: Rct1MhKey,
|
||||
Contract: "",
|
||||
@ -308,6 +309,7 @@ var (
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct2CID.String(),
|
||||
LeafMhKey: Rct2MhKey,
|
||||
Contract: "",
|
||||
@ -315,6 +317,7 @@ var (
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct3CID.String(),
|
||||
LeafMhKey: Rct3MhKey,
|
||||
Contract: ContractAddress.String(),
|
||||
@ -322,6 +325,7 @@ var (
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct4CID.String(),
|
||||
LeafMhKey: Rct4MhKey,
|
||||
Contract: "",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -1268,10 +1269,12 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
if args.Condition.BlockHash != nil {
|
||||
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
headerCIDs = append(headerCIDs, headerCID)
|
||||
}
|
||||
|
||||
headerCIDs = append(headerCIDs, headerCID)
|
||||
} else if args.Condition.BlockNumber != nil {
|
||||
headerCIDs, err = r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(args.Condition.BlockNumber.ToInt().Int64())
|
||||
if err != nil {
|
||||
|
@ -55,6 +55,13 @@ func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint6
|
||||
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// FetchIPLD is used to retrieve an IPLD from Postgres mhkey and blockNumber
|
||||
func FetchIPLD(db *sqlx.DB, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||
var block []byte
|
||||
return block, db.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||
func MultihashKeyFromCID(c cid.Cid) string {
|
||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||
|
@ -1,26 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
# Remove any existing containers / volumes
|
||||
docker-compose down --remove-orphans --volumes
|
||||
|
||||
mkdir -p out
|
||||
|
||||
# Remove existing docker-tsdb directory
|
||||
rm -rf out/docker-tsdb/
|
||||
|
||||
# Copy over files to setup TimescaleDB
|
||||
ID=$(docker create vulcanize/ipld-eth-db:v4.1.1-alpha)
|
||||
docker cp $ID:/app/docker-tsdb out/docker-tsdb/
|
||||
docker rm -v $ID
|
||||
|
||||
# Spin up TimescaleDB
|
||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml up ipld-eth-db
|
||||
trap "docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes; rm -rf out/" SIGINT SIGTERM ERR
|
||||
sleep 45
|
||||
# Spin up DB and run migrations
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
sleep 30
|
||||
|
||||
# Run unit tests
|
||||
go clean -testcache
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8066 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing_v4 make test
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
||||
|
||||
# Clean up
|
||||
docker-compose -f out/docker-tsdb/docker-compose.test.yml -f docker-compose.yml down --remove-orphans --volumes
|
||||
docker-compose down --remove-orphans --volumes
|
||||
rm -rf out/
|
||||
|
@ -4,22 +4,22 @@
|
||||
|
||||
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||
|
||||
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.1.1-alpha) in ipld-eth-db repo.
|
||||
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.2.0-alpha) in ipld-eth-db repo.
|
||||
```bash
|
||||
# In ipld-eth-db repo.
|
||||
git checkout v4.1.1-alpha
|
||||
git checkout v4.2.0-alpha
|
||||
```
|
||||
|
||||
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.18-statediff-4.0.2-alpha) in go-ethereum repo.
|
||||
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.19-statediff-4.1.0-alpha) in go-ethereum repo.
|
||||
```bash
|
||||
# In go-ethereum repo.
|
||||
git checkout v1.10.18-statediff-4.0.2-alpha
|
||||
git checkout v1.10.19-statediff-4.1.0-alpha
|
||||
```
|
||||
|
||||
- Checkout working commit in stack-orchestrator repo.
|
||||
```bash
|
||||
# In stack-orchestrator repo.
|
||||
git checkout 418957a1f745c921b21286c13bb033f922a91ae9
|
||||
git checkout f2fd766f5400fcb9eb47b50675d2e3b1f2753702
|
||||
```
|
||||
|
||||
## Run
|
||||
@ -60,13 +60,11 @@
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=true
|
||||
eth_forward_eth_calls=false
|
||||
eth_proxy_on_error=false
|
||||
eth_http_path="go-ethereum:8545"
|
||||
ipld_eth_server_db_dependency=access-node
|
||||
go_ethereum_db_dependency=access-node
|
||||
connecting_db_name=vulcanize_testing_v4
|
||||
```
|
||||
|
||||
- Run stack-orchestrator:
|
||||
@ -77,8 +75,7 @@
|
||||
|
||||
./wrapper.sh \
|
||||
-e docker \
|
||||
-d ../docker/latest/docker-compose-timescale-db.yml \
|
||||
-d ../docker/local/docker-compose-db-migration.yml \
|
||||
-d ../docker/local/docker-compose-db-sharding.yml \
|
||||
-d ../docker/local/docker-compose-go-ethereum.yml \
|
||||
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
||||
-d ../docker/local/docker-compose-contract.yml \
|
||||
@ -107,13 +104,11 @@
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=false
|
||||
eth_forward_eth_calls=true
|
||||
eth_proxy_on_error=false
|
||||
eth_http_path="go-ethereum:8545"
|
||||
ipld_eth_server_db_dependency=access-node
|
||||
go_ethereum_db_dependency=access-node
|
||||
connecting_db_name=vulcanize_testing_v4
|
||||
```
|
||||
|
||||
- Stop the stack-orchestrator and start again using the same command
|
||||
|
Loading…
Reference in New Issue
Block a user