forked from cerc-io/ipld-eth-server
Merge branch 'v4' into telackey/pxy_get_storage_at
This commit is contained in:
commit
b37c6bb5a3
60
.github/workflows/on-pr-publish.yaml
vendored
Normal file
60
.github/workflows/on-pr-publish.yaml
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
name: Test, Build, and/or Publish
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v4
|
||||
with:
|
||||
# All of these options are optional, so you can remove them if you are happy with the defaults
|
||||
concurrent_skipping: "never"
|
||||
skip_after_successful_duplicate: "true"
|
||||
do_not_skip: '["workflow_dispatch", "schedule"]'
|
||||
run-tests:
|
||||
uses: ./.github/workflows/tests.yaml
|
||||
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
|
||||
needs: pre_job
|
||||
secrets:
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
with:
|
||||
STACK_ORCHESTRATOR_REF: "f2fd766f5400fcb9eb47b50675d2e3b1f2753702"
|
||||
GO_ETHEREUM_REF: "2ddad81c1a04ff494a706f2f757a0f75d2616fbd"
|
||||
IPLD_ETH_DB_REF: "6c00c38cc4e1db6f7c4cecbb62fdfd540fba50d6"
|
||||
build:
|
||||
name: Run docker build
|
||||
runs-on: ubuntu-latest
|
||||
needs: run-tests
|
||||
if: |
|
||||
always() &&
|
||||
(needs.run-tests.result == 'success' || needs.run-tests.result == 'skipped') &&
|
||||
github.event_name == 'release'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: |
|
||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||
- name: Run docker build
|
||||
run: make docker-build
|
||||
- name: Tag docker image SHA
|
||||
run: docker tag cerc-io/ipld-eth-server git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Tag docker image TAG
|
||||
run: docker tag git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
- name: Docker Login
|
||||
run: echo ${{ secrets.GITEA_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||
- name: Docker Push SHA
|
||||
run: docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Docker Push TAG
|
||||
run: docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}
|
10
.github/workflows/on-pr.yaml
vendored
10
.github/workflows/on-pr.yaml
vendored
@ -1,10 +0,0 @@
|
||||
name: Docker Build
|
||||
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
run-tests:
|
||||
uses: ./.github/workflows/tests.yaml
|
||||
secrets:
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
49
.github/workflows/publish.yaml
vendored
49
.github/workflows/publish.yaml
vendored
@ -1,49 +0,0 @@
|
||||
name: Publish Docker image
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
jobs:
|
||||
run-tests:
|
||||
uses: ./.github/workflows/tests.yaml
|
||||
secrets:
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
build:
|
||||
name: Run docker build
|
||||
runs-on: ubuntu-latest
|
||||
needs: run-tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||
- name: Run docker build
|
||||
run: make docker-build
|
||||
- name: Tag docker image
|
||||
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Docker Login
|
||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||
- name: Docker Push
|
||||
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
|
||||
push_to_registries:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: |
|
||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||
- name: Docker Login to Github Registry
|
||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||
- name: Docker Pull
|
||||
run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Docker Login to Docker Registry
|
||||
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
||||
- name: Tag docker image
|
||||
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
- name: Docker Push to Docker Hub
|
||||
run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
16
.github/workflows/run_unit_test.sh
vendored
16
.github/workflows/run_unit_test.sh
vendored
@ -1,29 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# Set up repo
|
||||
start_dir=$(pwd)
|
||||
temp_dir=$(mktemp -d)
|
||||
cd $temp_dir
|
||||
|
||||
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
||||
cd ipld-eth-server
|
||||
|
||||
## Remove the branch and github related info. This way future runs wont be confused.
|
||||
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||
|
||||
# Spin up DB
|
||||
docker-compose -f docker-compose.yml up -d ipld-eth-db
|
||||
trap "docker-compose down --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||
sleep 10
|
||||
# Spin up DB and run migrations
|
||||
echo 'docker-compose up -d migrations ipld-eth-db'
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
trap "docker-compose down -v --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||
sleep 60
|
||||
|
||||
# Remove old logs so there's no confusion, then run test
|
||||
rm -f /tmp/test.log /tmp/return_test.txt
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test | tee /tmp/test.log
|
||||
echo $? > /tmp/return_test.txt
|
||||
|
||||
# Clean up
|
||||
docker-compose -f docker-compose.yml down -v --remove-orphans
|
||||
docker-compose down -v --remove-orphans
|
||||
cd $start_dir
|
||||
rm -fr $temp_dir
|
||||
|
||||
|
55
.github/workflows/tests.yaml
vendored
55
.github/workflows/tests.yaml
vendored
@ -8,6 +8,16 @@ on:
|
||||
required: true
|
||||
BUILD_KEY:
|
||||
required: true
|
||||
inputs:
|
||||
STACK_ORCHESTRATOR_REF:
|
||||
required: true
|
||||
type: string
|
||||
GO_ETHEREUM_REF:
|
||||
required: true
|
||||
type: string
|
||||
IPLD_ETH_DB_REF:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@ -27,7 +37,7 @@ jobs:
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
#strategy:
|
||||
# matrix:
|
||||
# go-version: [1.16.x, 1.17.x]
|
||||
# go-version: [1.16.x, 1.17.x, 1.18.x]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -43,7 +53,6 @@ jobs:
|
||||
chmod 400 /tmp/key
|
||||
cat /tmp/git_repository
|
||||
cat /tmp/git_head_ref
|
||||
echo
|
||||
|
||||
- name: Raw SCP
|
||||
run: |
|
||||
@ -69,8 +78,6 @@ jobs:
|
||||
integrationtest:
|
||||
name: Run integration tests
|
||||
env:
|
||||
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: true
|
||||
ETH_FORWARD_ETH_CALLS: false
|
||||
@ -82,25 +89,33 @@ jobs:
|
||||
run: mkdir -p /tmp/go
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.18.0"
|
||||
go-version: "1.18.x"
|
||||
check-latest: true
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: cerc-io/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: cerc-io/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
run: |
|
||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
@ -115,9 +130,10 @@ jobs:
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
@ -130,8 +146,6 @@ jobs:
|
||||
integrationtest_forwardethcalls:
|
||||
name: Run integration tests for direct proxy fall-through of eth_calls
|
||||
env:
|
||||
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: false
|
||||
ETH_FORWARD_ETH_CALLS: true
|
||||
@ -143,25 +157,33 @@ jobs:
|
||||
run: mkdir -p /tmp/go
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.18.0"
|
||||
go-version: "1.18.x"
|
||||
check-latest: true
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: cerc-io/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: cerc-io/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
run: |
|
||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
@ -176,9 +198,10 @@ jobs:
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
|
10
Dockerfile
10
Dockerfile
@ -5,7 +5,7 @@ RUN apk --update --no-cache add make git g++ linux-headers
|
||||
RUN apk add busybox-extras
|
||||
|
||||
# Build ipld-eth-server
|
||||
WORKDIR /go/src/github.com/vulcanize/ipld-eth-server
|
||||
WORKDIR /go/src/github.com/cerc-io/ipld-eth-server
|
||||
|
||||
# Cache the modules
|
||||
ENV GO111MODULE=on
|
||||
@ -37,13 +37,13 @@ USER $USER
|
||||
|
||||
# chown first so dir is writable
|
||||
# note: using $USER is merged, but not in the stable release yet
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-server/$CONFIG_FILE config.toml
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-server/entrypoint.sh .
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/ipld-eth-server/$CONFIG_FILE config.toml
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/ipld-eth-server/entrypoint.sh .
|
||||
|
||||
|
||||
# keep binaries immutable
|
||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server
|
||||
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-server/ipld-eth-server ipld-eth-server
|
||||
COPY --from=builder /goose goose
|
||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments
|
||||
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-server/environments environments
|
||||
|
||||
ENTRYPOINT ["/app/entrypoint.sh"]
|
||||
|
8
Makefile
8
Makefile
@ -51,19 +51,19 @@ TEST_CONNECT_STRING = postgresql://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATAB
|
||||
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
||||
|
||||
.PHONY: test
|
||||
test: | $(GOOSE)
|
||||
test:
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test
|
||||
|
||||
.PHONY: integrationtest
|
||||
integrationtest: | $(GOOSE)
|
||||
integrationtest:
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
go run github.com/onsi/ginkgo/ginkgo -r test/ -v
|
||||
|
||||
.PHONY: test_local
|
||||
test_local: | $(GOOSE)
|
||||
test_local:
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
./scripts/run_unit_test.sh
|
||||
@ -135,4 +135,4 @@ import:
|
||||
## Build docker image
|
||||
.PHONY: docker-build
|
||||
docker-build:
|
||||
docker build -t vulcanize/ipld-eth-server .
|
||||
docker build -t cerc-io/ipld-eth-server .
|
||||
|
@ -22,20 +22,20 @@ Additional, unique endpoints are exposed which utilize the new indexes and state
|
||||
|
||||
## Dependencies
|
||||
Minimal build dependencies
|
||||
* Go (1.13)
|
||||
* Go (1.18)
|
||||
* Git
|
||||
* GCC compiler
|
||||
* This repository
|
||||
|
||||
External dependency
|
||||
* Postgres database populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
|
||||
* Postgres database populated by [ipld-eth-db](https://github.com/cerc-io/ipld-eth-db)
|
||||
|
||||
## Install
|
||||
Start by downloading ipld-eth-server and moving into the repo:
|
||||
|
||||
`GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server/v3`
|
||||
`GO111MODULE=off go get -d github.com/cerc-io/ipld-eth-server/v4`
|
||||
|
||||
`cd $GOPATH/src/github.com/vulcanize/ipld-eth-server/v3@v3.x.x`
|
||||
`cd $GOPATH/src/github.com/cerc-io/ipld-eth-server/v4@v4.x.x`
|
||||
|
||||
Then, build the binary:
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
var (
|
||||
|
16
cmd/serve.go
16
cmd/serve.go
@ -17,6 +17,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/mailgun/groupcache/v2"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -26,17 +27,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/mailgun/groupcache/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/vulcanize/gap-filler/pkg/mux"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
|
||||
srpc "github.com/vulcanize/ipld-eth-server/v3/pkg/rpc"
|
||||
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||
v "github.com/vulcanize/ipld-eth-server/v3/version"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/graphql"
|
||||
srpc "github.com/cerc-io/ipld-eth-server/v4/pkg/rpc"
|
||||
s "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
|
||||
v "github.com/cerc-io/ipld-eth-server/v4/version"
|
||||
)
|
||||
|
||||
var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
|
||||
@ -123,7 +123,7 @@ func startServers(server s.Server, settings *s.Config) error {
|
||||
|
||||
if settings.WSEnabled {
|
||||
logWithCommand.Info("starting up WS server")
|
||||
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil, true)
|
||||
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,7 +133,7 @@ func startServers(server s.Server, settings *s.Config) error {
|
||||
|
||||
if settings.HTTPEnabled {
|
||||
logWithCommand.Info("starting up HTTP server")
|
||||
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "debug", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
w "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
w "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
// subscribeCmd represents the subscribe command
|
||||
|
@ -18,14 +18,14 @@ package cmd
|
||||
import (
|
||||
"time"
|
||||
|
||||
validator "github.com/cerc-io/eth-ipfs-state-validator/v4/pkg"
|
||||
ipfsethdb "github.com/cerc-io/ipfs-ethdb/v4/postgres"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
||||
|
||||
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||
s "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
const GroupName = "statedb-validate"
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
v "github.com/vulcanize/ipld-eth-server/v3/version"
|
||||
v "github.com/cerc-io/ipld-eth-server/v4/version"
|
||||
)
|
||||
|
||||
// versionCmd represents the version command
|
||||
|
@ -1,18 +1,28 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
migrations:
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v4.2.3-alpha
|
||||
environment:
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_NAME: "vulcanize_testing"
|
||||
DATABASE_PASSWORD: "password"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
|
||||
ipld-eth-db:
|
||||
image: timescale/timescaledb:latest-pg14
|
||||
restart: always
|
||||
image: vulcanize/ipld-eth-db:v3.2.0
|
||||
command: ["postgres", "-c", "log_statement=all"]
|
||||
environment:
|
||||
POSTGRES_USER: "vdbm"
|
||||
POSTGRES_DB: "vulcanize_testing"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
volumes:
|
||||
- vdb_db_eth_server:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "127.0.0.1:8077:5432"
|
||||
command: ["postgres", "-c", "log_statement=all"]
|
||||
|
||||
eth-server:
|
||||
restart: unless-stopped
|
||||
@ -45,20 +55,5 @@ services:
|
||||
ports:
|
||||
- "127.0.0.1:8081:8081"
|
||||
|
||||
graphql:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: vulcanize/postgraphile:v1.0.1
|
||||
environment:
|
||||
- PG_HOST=db
|
||||
- PG_PORT=5432
|
||||
- PG_DATABASE=vulcanize_public
|
||||
- PG_USER=vdbm
|
||||
- PG_PASSWORD=password
|
||||
- SCHEMA=public,eth
|
||||
ports:
|
||||
- "127.0.0.1:5000:5000"
|
||||
|
||||
volumes:
|
||||
vdb_db_eth_server:
|
||||
|
@ -40,9 +40,9 @@ An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-s
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/watch"
|
||||
)
|
||||
|
||||
config, _ := eth.NewEthSubscriptionConfig()
|
||||
@ -160,9 +160,9 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-se
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/btc"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/btc"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/watch"
|
||||
)
|
||||
|
||||
config, _ := btc.NewBtcSubscriptionConfig()
|
||||
|
249
go.mod
249
go.mod
@ -1,51 +1,52 @@
|
||||
module github.com/vulcanize/ipld-eth-server/v3
|
||||
module github.com/cerc-io/ipld-eth-server/v4
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.10.18
|
||||
github.com/cerc-io/eth-ipfs-state-validator/v4 v4.0.10-alpha
|
||||
github.com/cerc-io/go-eth-state-node-iterator v1.1.9
|
||||
github.com/cerc-io/ipfs-ethdb/v4 v4.0.10-alpha
|
||||
github.com/ethereum/go-ethereum v1.10.26
|
||||
github.com/graph-gophers/graphql-go v1.3.0
|
||||
github.com/ipfs/go-block-format v0.0.3
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
github.com/ipfs/go-ipld-format v0.2.0
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/ipfs/go-cid v0.2.0
|
||||
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/lib/pq v1.10.5
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/machinebox/graphql v0.2.2
|
||||
github.com/mailgun/groupcache/v2 v2.3.0
|
||||
github.com/multiformats/go-multihash v0.1.0
|
||||
github.com/multiformats/go-multihash v0.2.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/viper v1.11.0
|
||||
github.com/thoas/go-funk v0.9.2 // indirect
|
||||
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.2
|
||||
github.com/vulcanize/gap-filler v0.3.1
|
||||
github.com/vulcanize/ipfs-ethdb/v3 v3.0.3
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
github.com/vulcanize/gap-filler v0.4.2
|
||||
gorm.io/driver/postgres v1.3.7
|
||||
gorm.io/gorm v1.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||
github.com/Stebalien/go-bitfield v0.0.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/btcsuite/btcd v0.22.1 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/cskr/pubsub v1.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@ -53,28 +54,35 @@ require (
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/flynn/noise v1.0.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/friendsofgo/graphiql v0.2.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/georgysavva/scany v0.2.9 // indirect
|
||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graphql-go/graphql v0.7.9 // indirect
|
||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
@ -87,46 +95,49 @@ require (
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/go-bitswap v0.4.0 // indirect
|
||||
github.com/ipfs/go-blockservice v0.1.7 // indirect
|
||||
github.com/ipfs/go-cidutil v0.0.2 // indirect
|
||||
github.com/ipfs/go-datastore v0.4.6 // indirect
|
||||
github.com/ipfs/go-ds-measure v0.1.0 // indirect
|
||||
github.com/ipfs/go-fetcher v1.5.0 // indirect
|
||||
github.com/ipfs/go-filestore v1.0.0 // indirect
|
||||
github.com/ipfs/go-bitfield v1.0.0 // indirect
|
||||
github.com/ipfs/go-bitswap v0.8.0 // indirect
|
||||
github.com/ipfs/go-blockservice v0.4.0 // indirect
|
||||
github.com/ipfs/go-cidutil v0.1.0 // indirect
|
||||
github.com/ipfs/go-datastore v0.5.1 // indirect
|
||||
github.com/ipfs/go-delegated-routing v0.3.0 // indirect
|
||||
github.com/ipfs/go-ds-measure v0.2.0 // indirect
|
||||
github.com/ipfs/go-fetcher v1.6.1 // indirect
|
||||
github.com/ipfs/go-filestore v1.2.0 // indirect
|
||||
github.com/ipfs/go-fs-lock v0.0.7 // indirect
|
||||
github.com/ipfs/go-graphsync v0.8.0 // indirect
|
||||
github.com/ipfs/go-ipfs v0.10.0 // indirect
|
||||
github.com/ipfs/go-graphsync v0.13.1 // indirect
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect
|
||||
github.com/ipfs/go-ipfs-config v0.16.0 // indirect
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-files v0.0.8 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect
|
||||
github.com/ipfs/go-ipfs-files v0.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-keystore v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipfs-pinner v0.1.2 // indirect
|
||||
github.com/ipfs/go-ipfs-pinner v0.2.1 // indirect
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipfs-provider v0.6.1 // indirect
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipfs-provider v0.7.1 // indirect
|
||||
github.com/ipfs/go-ipfs-routing v0.2.1 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipld-cbor v0.0.5 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.1.1 // indirect
|
||||
github.com/ipfs/go-ipns v0.1.2 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.3.0 // indirect
|
||||
github.com/ipfs/go-merkledag v0.4.0 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/ipfs/go-merkledag v0.6.0 // indirect
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||
github.com/ipfs/go-mfs v0.1.2 // indirect
|
||||
github.com/ipfs/go-namesys v0.3.1 // indirect
|
||||
github.com/ipfs/go-path v0.1.2 // indirect
|
||||
github.com/ipfs/go-peertaskqueue v0.4.0 // indirect
|
||||
github.com/ipfs/go-unixfs v0.2.5 // indirect
|
||||
github.com/ipfs/go-unixfsnode v1.1.3 // indirect
|
||||
github.com/ipfs/go-mfs v0.2.1 // indirect
|
||||
github.com/ipfs/go-namesys v0.5.0 // indirect
|
||||
github.com/ipfs/go-path v0.3.0 // indirect
|
||||
github.com/ipfs/go-peertaskqueue v0.7.1 // indirect
|
||||
github.com/ipfs/go-unixfs v0.4.0 // indirect
|
||||
github.com/ipfs/go-unixfsnode v1.4.0 // indirect
|
||||
github.com/ipfs/go-verifcid v0.0.1 // indirect
|
||||
github.com/ipfs/interface-go-ipfs-core v0.5.1 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.3.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.12.2 // indirect
|
||||
github.com/ipfs/interface-go-ipfs-core v0.7.0 // indirect
|
||||
github.com/ipfs/kubo v0.14.0 // indirect
|
||||
github.com/ipld/edelweiss v0.1.4 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.4.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.17.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.12.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
@ -141,57 +152,39 @@ require (
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/jinzhu/copier v0.2.4 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.11.7 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/klauspost/compress v1.15.1 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
|
||||
github.com/koron/go-ssdp v0.0.2 // indirect
|
||||
github.com/libp2p/go-addr-util v0.1.0 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 // indirect
|
||||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-conn-security-multistream v0.2.1 // indirect
|
||||
github.com/libp2p/go-doh-resolver v0.3.1 // indirect
|
||||
github.com/libp2p/go-doh-resolver v0.4.0 // indirect
|
||||
github.com/libp2p/go-eventbus v0.2.1 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.0.3 // indirect
|
||||
github.com/libp2p/go-libp2p v0.15.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 // indirect
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.2 // indirect
|
||||
github.com/libp2p/go-libp2p-blankhost v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4 // indirect
|
||||
github.com/libp2p/go-libp2p-core v0.9.0 // indirect
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.13.1 // indirect
|
||||
github.com/libp2p/go-libp2p v0.20.3 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-core v0.16.1 // indirect
|
||||
github.com/libp2p/go-libp2p-discovery v0.7.0 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.16.0 // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-nat v0.0.6 // indirect
|
||||
github.com/libp2p/go-libp2p-noise v0.2.2 // indirect
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.8 // indirect
|
||||
github.com/libp2p/go-libp2p-pnet v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.5.4 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.12.0 // indirect
|
||||
github.com/libp2p/go-libp2p-peerstore v0.6.0 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.6.1 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.5.0 // indirect
|
||||
github.com/libp2p/go-libp2p-record v0.1.3 // indirect
|
||||
github.com/libp2p/go-libp2p-resource-manager v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.3 // indirect
|
||||
github.com/libp2p/go-libp2p-tls v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 // indirect
|
||||
github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db // indirect
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4 // indirect
|
||||
github.com/libp2p/go-maddr-filter v0.1.0 // indirect
|
||||
github.com/libp2p/go-mplex v0.3.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.0.6 // indirect
|
||||
github.com/libp2p/go-nat v0.0.5 // indirect
|
||||
github.com/libp2p/go-netroute v0.1.6 // indirect
|
||||
github.com/libp2p/go-libp2p-swarm v0.11.0 // indirect
|
||||
github.com/libp2p/go-libp2p-xor v0.1.0 // indirect
|
||||
github.com/libp2p/go-mplex v0.7.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.2.0 // indirect
|
||||
github.com/libp2p/go-nat v0.1.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.0 // indirect
|
||||
github.com/libp2p/go-openssl v0.0.7 // indirect
|
||||
github.com/libp2p/go-reuseport v0.0.2 // indirect
|
||||
github.com/libp2p/go-reuseport-transport v0.0.5 // indirect
|
||||
github.com/libp2p/go-sockaddr v0.1.1 // indirect
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect
|
||||
github.com/libp2p/go-tcp-transport v0.2.8 // indirect
|
||||
github.com/libp2p/go-ws-transport v0.5.0 // indirect
|
||||
github.com/libp2p/go-yamux/v2 v2.2.0 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.0.0 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
||||
github.com/libp2p/go-reuseport v0.2.0 // indirect
|
||||
github.com/libp2p/go-yamux/v3 v3.1.2 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.1.1 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.27.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||
@ -201,28 +194,30 @@ require (
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/miekg/dns v1.1.48 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||
github.com/multiformats/go-base32 v0.0.4 // indirect
|
||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.4.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.5.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||
github.com/multiformats/go-multicodec v0.3.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.2.2 // indirect
|
||||
github.com/multiformats/go-multibase v0.1.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.5.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.3.3 // indirect
|
||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
@ -231,13 +226,17 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/common v0.33.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/raulk/clock v1.1.0 // indirect
|
||||
github.com/raulk/go-watchdog v1.2.0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/segmentio/fasthash v1.0.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/smartystreets/assertions v1.0.1 // indirect
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
@ -246,46 +245,52 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.7.1 // indirect
|
||||
github.com/stretchr/testify v1.7.2 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
||||
github.com/thoas/go-funk v0.9.2 // indirect
|
||||
github.com/tidwall/gjson v1.14.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/urfave/cli/v2 v2.10.2 // indirect
|
||||
github.com/valyala/fastjson v1.6.3 // indirect
|
||||
github.com/wI2L/jsondiff v0.2.0 // indirect
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
||||
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 // indirect
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.7.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/dig v1.10.0 // indirect
|
||||
go.uber.org/fx v1.13.1 // indirect
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
go.uber.org/dig v1.14.0 // indirect
|
||||
go.uber.org/fx v1.16.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
golang.org/x/tools v0.1.10 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
lukechampine.com/blake3 v1.1.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.1.7 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.10.18 => github.com/vulcanize/go-ethereum v1.10.18-statediff-3.2.2
|
||||
replace github.com/ethereum/go-ethereum v1.10.26 => github.com/cerc-io/go-ethereum v1.10.26-statediff-4.2.2-alpha
|
||||
|
2
main.go
2
main.go
@ -18,7 +18,7 @@ package main
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/cmd"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -22,8 +22,8 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
// Client is used to subscribe to the ipld-eth-server ipld data stream
|
||||
|
54
pkg/debug/backend.go
Normal file
54
pkg/debug/backend.go
Normal file
@ -0,0 +1,54 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2022 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
var _ tracers.Backend = &Backend{}
|
||||
|
||||
var (
|
||||
errMethodNotSupported = errors.New("backend method not supported")
|
||||
)
|
||||
|
||||
// Backend implements tracers.Backend interface
|
||||
type Backend struct {
|
||||
eth.Backend
|
||||
}
|
||||
|
||||
// StateAtBlock retrieves the state database associated with a certain block
|
||||
func (b *Backend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) {
|
||||
rpcBlockNumber := rpc.BlockNumber(block.NumberU64())
|
||||
statedb, _, err := b.StateAndHeaderByNumberOrHash(ctx, rpc.BlockNumberOrHashWithNumber(rpcBlockNumber))
|
||||
return statedb, err
|
||||
}
|
||||
|
||||
// StateAtTransaction returns the execution environment of a certain transaction
|
||||
func (b *Backend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {
|
||||
return nil, vm.BlockContext{}, nil, errMethodNotSupported
|
||||
}
|
@ -42,7 +42,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEVMTimeout = 30 * time.Second
|
||||
defaultStateDiffTimeout = 240 * time.Second
|
||||
)
|
||||
|
||||
// APIName is the namespace for the watcher's eth api
|
||||
@ -66,7 +71,10 @@ type PublicEthAPI struct {
|
||||
}
|
||||
|
||||
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
|
||||
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardEthCalls, forwardGetStorageAt, proxyOnError bool) (*PublicEthAPI, error) {
|
||||
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardEthCalls, forwardGetStorageAt, proxyOnError bool) (*P
|
||||
if b == nil {
|
||||
return nil, errors.New("ipld-eth-server must be configured with an ethereum backend")
|
||||
}
|
||||
if forwardEthCalls && client == nil {
|
||||
return nil, errors.New("ipld-eth-server is configured to forward eth_calls to proxy node but no proxy node is configured")
|
||||
}
|
||||
@ -125,11 +133,10 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
|
||||
}
|
||||
|
||||
if pea.proxyOnError {
|
||||
if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil {
|
||||
var result map[string]interface{}
|
||||
if err := pea.rpc.CallContext(ctx, &result, "eth_getHeaderByHash", hash); result != nil && err == nil {
|
||||
go pea.writeStateDiffFor(hash)
|
||||
if res, err := pea.rpcMarshalHeader(header); err != nil {
|
||||
return res
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,6 +168,7 @@ func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
|
||||
// * When fullTx is true all transactions in the block are returned, otherwise
|
||||
// only the transaction hash is returned.
|
||||
func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
|
||||
logrus.Debug("Received getBlockByNumber request for number ", number.Int64())
|
||||
block, err := pea.B.BlockByNumber(ctx, number)
|
||||
if block != nil && err == nil {
|
||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||
@ -179,6 +187,7 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
|
||||
// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
|
||||
// detail, otherwise only the transaction hash is returned.
|
||||
func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
|
||||
logrus.Debug("Received getBlockByHash request for hash ", hash.Hex())
|
||||
block, err := pea.B.BlockByHash(ctx, hash)
|
||||
if block != nil && err == nil {
|
||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||
@ -195,21 +204,17 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
|
||||
}
|
||||
|
||||
// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.
|
||||
func (pea *PublicEthAPI) ChainId() (*hexutil.Big, error) {
|
||||
block, err := pea.B.CurrentBlock()
|
||||
if err != nil {
|
||||
func (pea *PublicEthAPI) ChainId() *hexutil.Big {
|
||||
if pea.B.Config.ChainConfig.ChainID == nil || pea.B.Config.ChainConfig.ChainID.Cmp(big.NewInt(0)) <= 0 {
|
||||
if pea.proxyOnError {
|
||||
if id, err := pea.ethClient.ChainID(context.Background()); err == nil {
|
||||
return (*hexutil.Big)(id), nil
|
||||
return (*hexutil.Big)(id)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
|
||||
if config := pea.B.Config.ChainConfig; config.IsEIP155(block.Number()) {
|
||||
return (*hexutil.Big)(config.ChainID), nil
|
||||
}
|
||||
return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block")
|
||||
return (*hexutil.Big)(pea.B.Config.ChainConfig.ChainID)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -846,6 +851,11 @@ func (pea *PublicEthAPI) localGetProof(ctx context.Context, address common.Addre
|
||||
}, state.Error()
|
||||
}
|
||||
|
||||
// GetSlice returns a slice of state or storage nodes from a provided root to a provided path and past it to a certain depth
|
||||
func (pea *PublicEthAPI) GetSlice(ctx context.Context, path string, depth int, root common.Hash, storage bool) (*GetSliceResponse, error) {
|
||||
return pea.B.GetSlice(path, depth, root, storage)
|
||||
}
|
||||
|
||||
// revertError is an API error that encompassas an EVM revertal with JSON error
|
||||
// code and a binary data blob.
|
||||
type revertError struct {
|
||||
@ -941,7 +951,7 @@ func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash
|
||||
return hex, err
|
||||
}
|
||||
|
||||
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, 5*time.Second, pea.B.Config.RPCGasCap.Uint64())
|
||||
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, defaultEVMTimeout, pea.B.Config.RPCGasCap.Uint64())
|
||||
|
||||
// If the result contains a revert reason, try to unpack and return it.
|
||||
if err == nil {
|
||||
@ -959,7 +969,12 @@ func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash
|
||||
return hex, nil
|
||||
}
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
return result.Return(), err
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
|
||||
@ -1068,7 +1083,7 @@ func (pea *PublicEthAPI) writeStateDiffAt(height int64) {
|
||||
return
|
||||
}
|
||||
// we use a separate context than the one provided by the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 240*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultStateDiffTimeout)
|
||||
defer cancel()
|
||||
var data json.RawMessage
|
||||
params := statediff.Params{
|
||||
@ -1091,7 +1106,7 @@ func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
|
||||
return
|
||||
}
|
||||
// we use a separate context than the one provided by the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 240*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultStateDiffTimeout)
|
||||
defer cancel()
|
||||
var data json.RawMessage
|
||||
params := statediff.Params{
|
||||
@ -1112,11 +1127,13 @@ func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
|
||||
func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
|
||||
fields, err := RPCMarshalBlock(b, inclTx, fullTx)
|
||||
if err != nil {
|
||||
logrus.Errorf("error RPC marshalling block with hash %s: %s", b.Hash().String(), err)
|
||||
return nil, err
|
||||
}
|
||||
if inclTx {
|
||||
td, err := pea.B.GetTd(b.Hash())
|
||||
if err != nil {
|
||||
logrus.Errorf("error getting td for block with hash and number %s, %s: %s", b.Hash().String(), b.Number().String(), err)
|
||||
return nil, err
|
||||
}
|
||||
fields["totalDifficulty"] = (*hexutil.Big)(td)
|
||||
|
@ -21,6 +21,9 @@ import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -35,11 +38,6 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -203,8 +201,8 @@ var _ = Describe("API", func() {
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||
StateDB: ethServerShared.GroupConfig{
|
||||
GroupCacheConfig: &shared.GroupCacheConfig{
|
||||
StateDB: shared.GroupConfig{
|
||||
Name: "api_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
@ -334,6 +332,18 @@ var _ = Describe("API", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||
})
|
||||
It("Retrieves a block by number with uncles in correct order", func() {
|
||||
block, err := api.GetBlockByNumber(ctx, londonBlockNum, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedUncles := []common.Hash{
|
||||
test_helpers.MockLondonUncles[0].Hash(),
|
||||
test_helpers.MockLondonUncles[1].Hash(),
|
||||
}
|
||||
Expect(block["uncles"]).To(Equal(expectedUncles))
|
||||
Expect(block["sha3Uncles"]).To(Equal(test_helpers.MockLondonBlock.UncleHash()))
|
||||
Expect(block["hash"]).To(Equal(test_helpers.MockLondonBlock.Hash()))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getBlockByHash", func() {
|
||||
@ -375,6 +385,18 @@ var _ = Describe("API", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||
})
|
||||
It("Retrieves a block by hash with uncles in correct order", func() {
|
||||
block, err := api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedUncles := []common.Hash{
|
||||
test_helpers.MockLondonUncles[0].Hash(),
|
||||
test_helpers.MockLondonUncles[1].Hash(),
|
||||
}
|
||||
Expect(block["uncles"]).To(Equal(expectedUncles))
|
||||
Expect(block["sha3Uncles"]).To(Equal(test_helpers.MockLondonBlock.UncleHash()))
|
||||
Expect(block["hash"]).To(Equal(test_helpers.MockLondonBlock.Hash()))
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -17,13 +17,17 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
validator "github.com/cerc-io/eth-ipfs-state-validator/v4/pkg"
|
||||
ipfsethdb "github.com/cerc-io/ipfs-ethdb/v4/postgres"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
@ -39,16 +43,14 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
||||
|
||||
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -56,34 +58,44 @@ var (
|
||||
errNegativeBlockNumber = errors.New("negative block number not supported")
|
||||
errHeaderHashNotFound = errors.New("header for hash not found")
|
||||
errHeaderNotFound = errors.New("header not found")
|
||||
errMultipleHeadersForHash = errors.New("more than one headers for the given hash")
|
||||
errTxHashNotFound = errors.New("transaction for hash not found")
|
||||
errTxHashInMultipleBlocks = errors.New("transaction for hash found in more than one canonical block")
|
||||
|
||||
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
||||
// to contain a 65 byte secp256k1 signature.
|
||||
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
|
||||
)
|
||||
|
||||
const (
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash
|
||||
FROM canonical_header_hash($1) AS block_hash
|
||||
WHERE block_hash IS NOT NULL`
|
||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
|
||||
RetrieveRPCTransaction = `SELECT blocks.data, header_id, transaction_cids.block_number, index
|
||||
FROM public.blocks, eth.transaction_cids
|
||||
WHERE blocks.key = transaction_cids.mh_key
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.tx_hash = $1`
|
||||
AND blocks.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.tx_hash = $1
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||
WHERE state_accounts.header_id = state_cids.header_id AND state_accounts.state_path = state_cids.state_path
|
||||
WHERE state_accounts.header_id = state_cids.header_id
|
||||
AND state_accounts.state_path = state_cids.state_path
|
||||
AND state_accounts.block_number = state_cids.block_number
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
AND state_leaf_key = $1
|
||||
AND block_number <= (SELECT block_number
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2)
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||
ORDER BY block_number DESC
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
||||
)
|
||||
@ -182,7 +194,23 @@ func (b *Backend) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumbe
|
||||
|
||||
// HeaderByHash gets the header for the provided block hash
|
||||
func (b *Backend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
||||
_, headerRLP, err := b.IPLDRetriever.RetrieveHeaderByHash(hash)
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
_, headerRLP, err := b.IPLDRetriever.RetrieveHeaderByHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -215,6 +243,10 @@ func (b *Backend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Bl
|
||||
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||
}
|
||||
|
||||
func (b *Backend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetTd gets the total difficulty at the given block hash
|
||||
func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
|
||||
var tdStr string
|
||||
@ -229,6 +261,11 @@ func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
|
||||
return td, nil
|
||||
}
|
||||
|
||||
// ChainConfig returns the active chain configuration.
|
||||
func (b *Backend) ChainConfig() *params.ChainConfig {
|
||||
return b.Config.ChainConfig
|
||||
}
|
||||
|
||||
// CurrentBlock returns the current block
|
||||
func (b *Backend) CurrentBlock() (*types.Block, error) {
|
||||
block, err := b.BlockByNumber(context.Background(), rpc.LatestBlockNumber)
|
||||
@ -267,7 +304,7 @@ func (b *Backend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Blo
|
||||
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||
}
|
||||
|
||||
// BlockByNumber returns the requested canonical block.
|
||||
// BlockByNumber returns the requested canonical block
|
||||
func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Block, error) {
|
||||
var err error
|
||||
number := blockNumber.Int64()
|
||||
@ -289,6 +326,7 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
if number < 0 {
|
||||
return nil, errNegativeBlockNumber
|
||||
}
|
||||
|
||||
// Get the canonical hash
|
||||
canonicalHash, err := b.GetCanonicalHash(uint64(number))
|
||||
if err != nil {
|
||||
@ -297,112 +335,12 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Retrieve all the CIDs for the block
|
||||
// TODO: optimize this by retrieving iplds directly rather than the cids first (this is remanent from when we fetched iplds through ipfs blockservice interface)
|
||||
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(canonicalHash)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
|
||||
return b.BlockByHash(ctx, canonicalHash)
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var header types.Header
|
||||
err = rlp.DecodeBytes(headerIPLD.Data, &header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var uncles []*types.Header
|
||||
for _, uncleIPLD := range uncleIPLDs {
|
||||
var uncle types.Header
|
||||
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var transactions []*types.Transaction
|
||||
for _, txIPLD := range txIPLDs {
|
||||
var transaction types.Transaction
|
||||
err = transaction.UnmarshalBinary(txIPLD.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var receipts []*types.Receipt
|
||||
for _, rctIPLD := range rctIPLDs {
|
||||
var receipt types.Receipt
|
||||
nodeVal, err := DecodeLeafNode(rctIPLD.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = receipt.UnmarshalBinary(nodeVal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
receipts = append(receipts, &receipt)
|
||||
}
|
||||
// Compose everything together into a complete block
|
||||
return types.NewBlock(&header, transactions, uncles, receipts, new(trie.Trie)), err
|
||||
}
|
||||
|
||||
// BlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
|
||||
// detail, otherwise only the transaction hash is returned.
|
||||
// BlockByHash returns the requested block
|
||||
func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
||||
// Retrieve all the CIDs for the block
|
||||
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
@ -419,105 +357,156 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
||||
}
|
||||
}()
|
||||
|
||||
// Fetch and decode the header IPLD
|
||||
var headerIPLD models.IPLDModel
|
||||
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||
// Fetch header
|
||||
header, err := b.GetHeaderByBlockHash(tx, hash)
|
||||
if err != nil {
|
||||
log.Error("error fetching header: ", err)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var header types.Header
|
||||
err = rlp.DecodeBytes(headerIPLD.Data, &header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fetch and decode the uncle IPLDs
|
||||
var uncleIPLDs []models.IPLDModel
|
||||
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var uncles []*types.Header
|
||||
for _, uncleIPLD := range uncleIPLDs {
|
||||
var uncle types.Header
|
||||
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncles = append(uncles, &uncle)
|
||||
}
|
||||
// Fetch and decode the transaction IPLDs
|
||||
var txIPLDs []models.IPLDModel
|
||||
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var transactions []*types.Transaction
|
||||
for _, txIPLD := range txIPLDs {
|
||||
var transaction types.Transaction
|
||||
err = transaction.UnmarshalBinary(txIPLD.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions = append(transactions, &transaction)
|
||||
}
|
||||
// Fetch and decode the receipt IPLDs
|
||||
var rctIPLDs []models.IPLDModel
|
||||
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var receipts []*types.Receipt
|
||||
for _, rctIPLD := range rctIPLDs {
|
||||
var receipt types.Receipt
|
||||
nodeVal, err := DecodeLeafNode(rctIPLD.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = receipt.UnmarshalBinary(nodeVal)
|
||||
if err != nil {
|
||||
blockNumber := header.Number.Uint64()
|
||||
|
||||
// Fetch uncles
|
||||
uncles, err := b.GetUnclesByBlockHashAndNumber(tx, hash, blockNumber)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
log.Error("error fetching uncles: ", err)
|
||||
return nil, err
|
||||
}
|
||||
receipts = append(receipts, &receipt)
|
||||
|
||||
// When num. of uncles = 2,
|
||||
// Check if calculated uncle hash matches the one in header
|
||||
// If not, re-order the two uncles
|
||||
// Assumption: Max num. of uncles in mainnet = 2
|
||||
if len(uncles) == 2 {
|
||||
uncleHash := types.CalcUncleHash(uncles)
|
||||
if uncleHash != header.UncleHash {
|
||||
uncles[0], uncles[1] = uncles[1], uncles[0]
|
||||
|
||||
uncleHash = types.CalcUncleHash(uncles)
|
||||
// Check if uncle hash matches after re-ordering
|
||||
if uncleHash != header.UncleHash {
|
||||
log.Error("uncle hash mismatch for block hash: ", hash.Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch transactions
|
||||
transactions, err := b.GetTransactionsByBlockHashAndNumber(tx, hash, blockNumber)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
log.Error("error fetching transactions: ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch receipts
|
||||
receipts, err := b.GetReceiptsByBlockHashAndNumber(tx, hash, blockNumber)
|
||||
if err != nil && err != sql.ErrNoRows {
|
||||
log.Error("error fetching receipts: ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compose everything together into a complete block
|
||||
return types.NewBlock(&header, transactions, uncles, receipts, new(trie.Trie)), err
|
||||
return types.NewBlock(header, transactions, uncles, receipts, new(trie.Trie)), err
|
||||
}
|
||||
|
||||
// GetTransaction retrieves a tx by hash
|
||||
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
|
||||
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
||||
var tempTxStruct struct {
|
||||
Data []byte `db:"data"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
Index uint64 `db:"index"`
|
||||
}
|
||||
if err := b.DB.Get(&tempTxStruct, RetrieveRPCTransaction, txHash.String()); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
var transaction types.Transaction
|
||||
if err := transaction.UnmarshalBinary(tempTxStruct.Data); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
|
||||
// GetHeaderByBlockHash retrieves header for a provided block hash
|
||||
func (b *Backend) GetHeaderByBlockHash(tx *sqlx.Tx, hash common.Hash) (*types.Header, error) {
|
||||
_, headerRLP, err := b.IPLDRetriever.RetrieveHeaderByHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetReceipts retrieves receipts for provided block hash
|
||||
func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
||||
header := new(types.Header)
|
||||
return header, rlp.DecodeBytes(headerRLP, header)
|
||||
}
|
||||
|
||||
// GetUnclesByBlockHash retrieves uncles for a provided block hash
|
||||
func (b *Backend) GetUnclesByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]*types.Header, error) {
|
||||
_, uncleBytes, err := b.IPLDRetriever.RetrieveUnclesByBlockHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uncles := make([]*types.Header, len(uncleBytes))
|
||||
for i, bytes := range uncleBytes {
|
||||
var uncle types.Header
|
||||
err = rlp.DecodeBytes(bytes, &uncle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uncles[i] = &uncle
|
||||
}
|
||||
|
||||
return uncles, nil
|
||||
}
|
||||
|
||||
// GetUnclesByBlockHashAndNumber retrieves uncles for a provided block hash and number
|
||||
func (b *Backend) GetUnclesByBlockHashAndNumber(tx *sqlx.Tx, hash common.Hash, number uint64) ([]*types.Header, error) {
|
||||
_, uncleBytes, err := b.IPLDRetriever.RetrieveUncles(tx, hash, number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uncles := make([]*types.Header, len(uncleBytes))
|
||||
for i, bytes := range uncleBytes {
|
||||
var uncle types.Header
|
||||
err = rlp.DecodeBytes(bytes, &uncle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uncles[i] = &uncle
|
||||
}
|
||||
|
||||
return uncles, nil
|
||||
}
|
||||
|
||||
// GetTransactionsByBlockHash retrieves transactions for a provided block hash
|
||||
func (b *Backend) GetTransactionsByBlockHash(tx *sqlx.Tx, hash common.Hash) (types.Transactions, error) {
|
||||
_, transactionBytes, err := b.IPLDRetriever.RetrieveTransactionsByBlockHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txs := make(types.Transactions, len(transactionBytes))
|
||||
for i, txBytes := range transactionBytes {
|
||||
var tx types.Transaction
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txs[i] = &tx
|
||||
}
|
||||
|
||||
return txs, nil
|
||||
}
|
||||
|
||||
// GetTransactionsByBlockHashAndNumber retrieves transactions for a provided block hash and number
|
||||
func (b *Backend) GetTransactionsByBlockHashAndNumber(tx *sqlx.Tx, hash common.Hash, number uint64) (types.Transactions, error) {
|
||||
_, transactionBytes, err := b.IPLDRetriever.RetrieveTransactions(tx, hash, number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txs := make(types.Transactions, len(transactionBytes))
|
||||
for i, txBytes := range transactionBytes {
|
||||
var tx types.Transaction
|
||||
if err := tx.UnmarshalBinary(txBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txs[i] = &tx
|
||||
}
|
||||
|
||||
return txs, nil
|
||||
}
|
||||
|
||||
// GetReceiptsByBlockHash retrieves receipts for a provided block hash
|
||||
func (b *Backend) GetReceiptsByBlockHash(tx *sqlx.Tx, hash common.Hash) (types.Receipts, error) {
|
||||
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -533,9 +522,99 @@ func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Rece
|
||||
return rcts, nil
|
||||
}
|
||||
|
||||
// GetReceiptsByBlockHashAndNumber retrieves receipts for a provided block hash and number
|
||||
func (b *Backend) GetReceiptsByBlockHashAndNumber(tx *sqlx.Tx, hash common.Hash, number uint64) (types.Receipts, error) {
|
||||
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceipts(tx, hash, number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rcts := make(types.Receipts, len(receiptBytes))
|
||||
for i, rctBytes := range receiptBytes {
|
||||
rct := new(types.Receipt)
|
||||
if err := rct.UnmarshalBinary(rctBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rct.TxHash = txs[i]
|
||||
rcts[i] = rct
|
||||
}
|
||||
return rcts, nil
|
||||
}
|
||||
|
||||
// GetTransaction retrieves a tx by hash
|
||||
// It also returns the blockhash, blocknumber, and tx index associated with the transaction
|
||||
func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
||||
type txRes struct {
|
||||
Data []byte `db:"data"`
|
||||
HeaderID string `db:"header_id"`
|
||||
BlockNumber uint64 `db:"block_number"`
|
||||
Index uint64 `db:"index"`
|
||||
}
|
||||
var res = make([]txRes, 0)
|
||||
if err := b.DB.Select(&res, RetrieveRPCTransaction, txHash.String()); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
|
||||
if len(res) == 0 {
|
||||
return nil, common.Hash{}, 0, 0, errTxHashNotFound
|
||||
} else if len(res) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return nil, common.Hash{}, 0, 0, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
var transaction types.Transaction
|
||||
if err := transaction.UnmarshalBinary(res[0].Data); err != nil {
|
||||
return nil, common.Hash{}, 0, 0, err
|
||||
}
|
||||
|
||||
return &transaction, common.HexToHash(res[0].HeaderID), res[0].BlockNumber, res[0].Index, nil
|
||||
}
|
||||
|
||||
// GetReceipts retrieves receipts for provided block hash
|
||||
func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
headerCID, err := b.Retriever.RetrieveHeaderCIDByHash(tx, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockNumber, _ := strconv.ParseUint(string(headerCID.BlockNumber), 10, 64)
|
||||
|
||||
return b.GetReceiptsByBlockHashAndNumber(tx, hash, blockNumber)
|
||||
}
|
||||
|
||||
// GetLogs returns all the logs for the given block hash
|
||||
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
||||
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
|
||||
// Begin tx
|
||||
tx, err := b.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceipts(tx, hash, number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -811,6 +890,192 @@ func (b *Backend) GetStorageByHash(ctx context.Context, address common.Address,
|
||||
return storageRlp, err
|
||||
}
|
||||
|
||||
func (b *Backend) GetSlice(path string, depth int, root common.Hash, storage bool) (*GetSliceResponse, error) {
|
||||
response := new(GetSliceResponse)
|
||||
response.init(path, depth, root)
|
||||
|
||||
// Metadata fields
|
||||
metaData := metaDataFields{}
|
||||
|
||||
startTime := makeTimestamp()
|
||||
t, _ := b.StateDatabase.OpenTrie(root)
|
||||
metaData.trieLoadingTime = makeTimestamp() - startTime
|
||||
|
||||
// Convert the head hex path to a decoded byte path
|
||||
headPath := common.FromHex(path)
|
||||
|
||||
// Get Stem nodes
|
||||
err := b.getSliceStem(headPath, t, response, &metaData, storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get Head node
|
||||
err = b.getSliceHead(headPath, t, response, &metaData, storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if depth > 0 {
|
||||
// Get Slice nodes
|
||||
err = b.getSliceTrie(headPath, t, response, &metaData, depth, storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
response.populateMetaData(metaData)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Backend) getSliceStem(headPath []byte, t state.Trie, response *GetSliceResponse, metaData *metaDataFields, storage bool) error {
|
||||
leavesFetchTime := int64(0)
|
||||
totalStemStartTime := makeTimestamp()
|
||||
|
||||
for i := 0; i < len(headPath); i++ {
|
||||
// Create path for each node along the stem
|
||||
nodePath := make([]byte, len(headPath[:i]))
|
||||
copy(nodePath, headPath[:i])
|
||||
|
||||
rawNode, _, err := t.(*trie.StateTrie).TryGetNode(trie.HexToCompact(nodePath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip if node not found
|
||||
if rawNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
node, nodeElements, err := ResolveNode(nodePath, rawNode, b.StateDatabase.TrieDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
leafFetchTime, err := fillSliceNodeData(b.EthDB, response.TrieNodes.Stem, response.Leaves, node, nodeElements, storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metadata
|
||||
depthReached := len(node.Path) - len(headPath)
|
||||
if depthReached > metaData.maxDepth {
|
||||
metaData.maxDepth = depthReached
|
||||
}
|
||||
if node.NodeType == sdtypes.Leaf {
|
||||
metaData.leafCount++
|
||||
}
|
||||
leavesFetchTime += leafFetchTime
|
||||
}
|
||||
|
||||
// Update metadata time metrics
|
||||
totalStemTime := makeTimestamp() - totalStemStartTime
|
||||
metaData.sliceNodesFetchTime = totalStemTime - leavesFetchTime
|
||||
metaData.leavesFetchTime += leavesFetchTime
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Backend) getSliceHead(headPath []byte, t state.Trie, response *GetSliceResponse, metaData *metaDataFields, storage bool) error {
|
||||
totalHeadStartTime := makeTimestamp()
|
||||
|
||||
rawNode, _, err := t.(*trie.StateTrie).TryGetNode(trie.HexToCompact(headPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip if node not found
|
||||
if rawNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
node, nodeElements, err := ResolveNode(headPath, rawNode, b.StateDatabase.TrieDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
leafFetchTime, err := fillSliceNodeData(b.EthDB, response.TrieNodes.Head, response.Leaves, node, nodeElements, storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metadata
|
||||
depthReached := len(node.Path) - len(headPath)
|
||||
if depthReached > metaData.maxDepth {
|
||||
metaData.maxDepth = depthReached
|
||||
}
|
||||
if node.NodeType == sdtypes.Leaf {
|
||||
metaData.leafCount++
|
||||
}
|
||||
|
||||
// Update metadata time metrics
|
||||
totalHeadTime := makeTimestamp() - totalHeadStartTime
|
||||
metaData.stemNodesFetchTime = totalHeadTime - leafFetchTime
|
||||
metaData.leavesFetchTime += leafFetchTime
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Backend) getSliceTrie(headPath []byte, t state.Trie, response *GetSliceResponse, metaData *metaDataFields, depth int, storage bool) error {
|
||||
it, timeTaken := getIteratorAtPath(t, headPath)
|
||||
metaData.trieLoadingTime += timeTaken
|
||||
|
||||
leavesFetchTime := int64(0)
|
||||
totalSliceStartTime := makeTimestamp()
|
||||
|
||||
headPathLen := len(headPath)
|
||||
maxPathLen := headPathLen + depth
|
||||
descend := true
|
||||
for it.Next(descend) {
|
||||
pathLen := len(it.Path())
|
||||
|
||||
// End iteration on coming out of subtrie
|
||||
if pathLen <= headPathLen {
|
||||
break
|
||||
}
|
||||
|
||||
// Avoid descending further if max depth reached
|
||||
if pathLen >= maxPathLen {
|
||||
descend = false
|
||||
} else {
|
||||
descend = true
|
||||
}
|
||||
|
||||
// Skip value nodes
|
||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
||||
continue
|
||||
}
|
||||
|
||||
node, nodeElements, err := sdtrie.ResolveNode(it, b.StateDatabase.TrieDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
leafFetchTime, err := fillSliceNodeData(b.EthDB, response.TrieNodes.Slice, response.Leaves, node, nodeElements, storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update metadata
|
||||
depthReached := len(node.Path) - len(headPath)
|
||||
if depthReached > metaData.maxDepth {
|
||||
metaData.maxDepth = depthReached
|
||||
}
|
||||
if node.NodeType == sdtypes.Leaf {
|
||||
metaData.leafCount++
|
||||
}
|
||||
leavesFetchTime += leafFetchTime
|
||||
}
|
||||
|
||||
// Update metadata time metrics
|
||||
totalSliceTime := makeTimestamp() - totalSliceStartTime
|
||||
metaData.sliceNodesFetchTime = totalSliceTime - leavesFetchTime
|
||||
metaData.leavesFetchTime += leavesFetchTime
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Engine satisfied the ChainContext interface
|
||||
func (b *Backend) Engine() consensus.Engine {
|
||||
// TODO: we need to support more than just ethash based engines
|
||||
@ -832,8 +1097,8 @@ func (b *Backend) ValidateTrie(stateRoot common.Hash) error {
|
||||
}
|
||||
|
||||
// RPCGasCap returns the configured gas cap for the rpc server
|
||||
func (b *Backend) RPCGasCap() *big.Int {
|
||||
return b.Config.RPCGasCap
|
||||
func (b *Backend) RPCGasCap() uint64 {
|
||||
return b.Config.RPCGasCap.Uint64()
|
||||
}
|
||||
|
||||
func (b *Backend) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription {
|
||||
|
@ -17,20 +17,32 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
nodeiter "github.com/cerc-io/go-eth-state-node-iterator"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var nullHashBytes = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
var emptyCodeHash = crypto.Keccak256([]byte{})
|
||||
|
||||
// RPCMarshalHeader converts the given header to the RPC output.
|
||||
// This function is eth/internal so we have to make our own version here...
|
||||
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
||||
@ -166,6 +178,11 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
|
||||
result.TransactionIndex = (*hexutil.Uint64)(&index)
|
||||
}
|
||||
switch tx.Type() {
|
||||
case types.LegacyTxType:
|
||||
// if a legacy transaction has an EIP-155 chain id, include it explicitly
|
||||
if id := tx.ChainId(); id.Sign() != 0 {
|
||||
result.ChainID = (*hexutil.Big)(id)
|
||||
}
|
||||
case types.AccessListTxType:
|
||||
al := tx.AccessList()
|
||||
result.Accesses = &al
|
||||
@ -295,3 +312,98 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
func getIteratorAtPath(t state.Trie, startKey []byte) (trie.NodeIterator, int64) {
|
||||
startTime := makeTimestamp()
|
||||
var it trie.NodeIterator
|
||||
|
||||
if len(startKey)%2 != 0 {
|
||||
// Zero-pad for odd-length keys, required by HexToKeyBytes()
|
||||
startKey = append(startKey, 0)
|
||||
it = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
|
||||
} else {
|
||||
it = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
|
||||
// Step to the required node (not required if original startKey was odd-length)
|
||||
it.Next(true)
|
||||
}
|
||||
|
||||
return it, makeTimestamp() - startTime
|
||||
}
|
||||
|
||||
func fillSliceNodeData(
|
||||
ethDB ethdb.KeyValueReader,
|
||||
nodesMap map[string]string,
|
||||
leavesMap map[string]GetSliceResponseAccount,
|
||||
node sdtypes.StateNode,
|
||||
nodeElements []interface{},
|
||||
storage bool,
|
||||
) (int64, error) {
|
||||
// Populate the nodes map
|
||||
nodeValHash := crypto.Keccak256Hash(node.NodeValue)
|
||||
nodesMap[common.Bytes2Hex(nodeValHash.Bytes())] = common.Bytes2Hex(node.NodeValue)
|
||||
|
||||
// Extract account data if it's a Leaf node
|
||||
leafStartTime := makeTimestamp()
|
||||
if node.NodeType == sdtypes.Leaf && !storage {
|
||||
stateLeafKey, storageRoot, code, err := extractContractAccountInfo(ethDB, node, nodeElements)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("GetSlice account lookup error: %s", err.Error())
|
||||
}
|
||||
|
||||
if len(code) > 0 {
|
||||
// Populate the leaves map
|
||||
leavesMap[stateLeafKey] = GetSliceResponseAccount{
|
||||
StorageRoot: storageRoot,
|
||||
EVMCode: common.Bytes2Hex(code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return makeTimestamp() - leafStartTime, nil
|
||||
}
|
||||
|
||||
func extractContractAccountInfo(ethDB ethdb.KeyValueReader, node sdtypes.StateNode, nodeElements []interface{}) (string, string, []byte, error) {
|
||||
var account types.StateAccount
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return "", "", nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
|
||||
if bytes.Equal(account.CodeHash, emptyCodeHash) {
|
||||
return "", "", nil, nil
|
||||
}
|
||||
|
||||
// Extract state leaf key
|
||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := trie.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
stateLeafKeyString := common.BytesToHash(leafKey).String()
|
||||
|
||||
storageRootString := account.Root.String()
|
||||
|
||||
// Extract codeHash and get code
|
||||
codeHash := common.BytesToHash(account.CodeHash)
|
||||
codeBytes := rawdb.ReadCode(ethDB, codeHash)
|
||||
|
||||
return stateLeafKeyString, storageRootString, codeBytes, nil
|
||||
}
|
||||
|
||||
func ResolveNode(path []byte, node []byte, trieDB *trie.Database) (sdtypes.StateNode, []interface{}, error) {
|
||||
nodePath := make([]byte, len(path))
|
||||
copy(nodePath, path)
|
||||
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return sdtypes.StateNode{}, nil, err
|
||||
}
|
||||
|
||||
ty, err := sdtrie.CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return sdtypes.StateNode{}, nil, err
|
||||
}
|
||||
return sdtypes.StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
}, nodeElements, nil
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ package eth
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -29,7 +30,7 @@ import (
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
// Retriever interface for substituting mocks in tests
|
||||
@ -57,7 +58,7 @@ func (IPLDModelRecord) TableName() string {
|
||||
type HeaderCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
BlockHash string `gorm:"primaryKey"`
|
||||
BlockNumber string
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
ParentHash string
|
||||
Timestamp uint64
|
||||
StateRoot string
|
||||
@ -70,8 +71,8 @@ type HeaderCIDRecord struct {
|
||||
|
||||
// gorm doesn't check if foreign key exists in database.
|
||||
// It is required to eager load relations using preload.
|
||||
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID;references:BlockHash"`
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
|
||||
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID,BlockNumber;references:BlockHash,BlockNumber"`
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by HeaderCIDRecord
|
||||
@ -82,12 +83,13 @@ func (HeaderCIDRecord) TableName() string {
|
||||
type TransactionCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
TxHash string `gorm:"primaryKey"`
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
HeaderID string `gorm:"column:header_id"`
|
||||
Index int64
|
||||
Src string
|
||||
Dst string
|
||||
MhKey string
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by TransactionCIDRecord
|
||||
@ -189,7 +191,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
||||
}
|
||||
// Retrieve cached receipt CIDs
|
||||
if !filter.ReceiptFilter.Off {
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes)
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, filter.ReceiptFilter, 0, header.BlockHash, trxHashes)
|
||||
if err != nil {
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, true, err
|
||||
@ -231,8 +233,8 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]m
|
||||
log.Debug("retrieving header cids for block ", blockNumber)
|
||||
headers := make([]models.HeaderModel, 0)
|
||||
pgStr := `SELECT CAST(block_number as Text), block_hash, parent_hash, cid, mh_key, CAST(td as Text), node_id,
|
||||
CAST(reward as Text), state_root, uncle_root, tx_root, receipt_root, bloom, timestamp, times_validated,
|
||||
coinbase FROM eth.header_cids
|
||||
CAST(reward as Text), state_root, uncle_root,tx_root, receipt_root, bloom, timestamp, times_validated, coinbase
|
||||
FROM eth.header_cids
|
||||
WHERE block_number = $1`
|
||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||
}
|
||||
@ -241,7 +243,8 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]m
|
||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
||||
log.Debug("retrieving uncle cids for block id ", headerID)
|
||||
headers := make([]models.UncleModel, 0)
|
||||
pgStr := `SELECT header_id,block_hash,parent_hash,cid,mh_key, CAST(reward as text) FROM eth.uncle_cids
|
||||
pgStr := `SELECT CAST(block_number as Text), header_id, block_hash, parent_hash, cid, mh_key, CAST(reward as text)
|
||||
FROM eth.uncle_cids
|
||||
WHERE header_id = $1`
|
||||
return headers, tx.Select(&headers, pgStr, headerID)
|
||||
}
|
||||
@ -253,9 +256,14 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
|
||||
args := make([]interface{}, 0, 3)
|
||||
results := make([]models.TxModel, 0)
|
||||
id := 1
|
||||
pgStr := fmt.Sprintf(`SELECT transaction_cids.tx_hash, transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key,
|
||||
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
||||
transaction_cids.header_id, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data, transaction_cids.tx_type
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $%d`, id)
|
||||
args = append(args, headerID)
|
||||
id++
|
||||
@ -350,45 +358,33 @@ func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilte
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND header_cids.block_hash = $1`
|
||||
id := 2
|
||||
args = append(args, headerID)
|
||||
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
|
||||
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
receiptCIDs := make([]models.ReceiptModel, 0)
|
||||
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
||||
}
|
||||
|
||||
// RetrieveFilteredGQLLogs retrieves and returns all the log cIDs provided blockHash that conform to the provided
|
||||
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash, blockNumber *big.Int) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids with block hash", blockHash.String())
|
||||
args := make([]interface{}, 0, 4)
|
||||
id := 1
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.header_id as block_hash,
|
||||
eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id, eth.log_cids.address,
|
||||
eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3, eth.log_cids.log_data,
|
||||
data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, eth.receipt_cids.tx_id AS tx_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, public.blocks
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
|
||||
AND eth.log_cids.header_id = receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = receipt_cids.block_number
|
||||
AND log_cids.leaf_mh_key = blocks.key
|
||||
AND log_cids.block_number = blocks.block_number
|
||||
AND receipt_cids.header_id = $1`
|
||||
|
||||
args = append(args, blockHash.String())
|
||||
id++
|
||||
|
||||
if blockNumber != nil {
|
||||
pgStr += ` AND receipt_cids.block_number = $2`
|
||||
id++
|
||||
args = append(args, blockNumber.Int64())
|
||||
}
|
||||
|
||||
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||
pgStr += ` ORDER BY log_cids.index`
|
||||
|
||||
@ -401,19 +397,24 @@ func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptF
|
||||
return logCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
|
||||
// RetrieveFilteredLog retrieves and returns all the log CIDs provided blockHeight or blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
||||
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash`
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
@ -440,22 +441,26 @@ func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilte
|
||||
|
||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash string, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 5)
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id,
|
||||
receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash`
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
args = append(args, blockNumber)
|
||||
id++
|
||||
}
|
||||
if blockHash != nil {
|
||||
if blockHash != "" {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||
args = append(args, blockHash.String())
|
||||
args = append(args, blockHash)
|
||||
id++
|
||||
}
|
||||
|
||||
@ -479,9 +484,13 @@ func hasTopics(topics [][]string) bool {
|
||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
||||
log.Debug("retrieving state cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 2)
|
||||
pgStr := `SELECT state_cids.header_id,
|
||||
pgStr := `SELECT CAST(state_cids.block_number as Text), state_cids.header_id,
|
||||
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
addrLen := len(stateFilter.Addresses)
|
||||
@ -504,11 +513,15 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
|
||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||
log.Debug("retrieving storage cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
pgStr := `SELECT storage_cids.header_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
||||
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path, state_cids.state_leaf_key
|
||||
pgStr := `SELECT CAST(storage_cids.block_number as Text), storage_cids.header_id, storage_cids.storage_leaf_key,
|
||||
storage_cids.node_type, storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path,
|
||||
state_cids.state_leaf_key
|
||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||
WHERE storage_cids.header_id = state_cids.header_id AND storage_cids.state_path = state_cids.state_path
|
||||
WHERE storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
AND storage_cids.block_number = state_cids.block_number
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
AND header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
id := 2
|
||||
@ -559,6 +572,10 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
||||
log.Error("header cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||
if err != nil {
|
||||
@ -566,7 +583,7 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
@ -576,7 +593,7 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID.BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -619,7 +636,7 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
@ -629,7 +646,7 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID[0].BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
@ -647,26 +664,32 @@ func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.H
|
||||
}
|
||||
|
||||
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.TxModel, error) {
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string, blockNumber int64) ([]models.TxModel, error) {
|
||||
log.Debug("retrieving tx cids for block id ", headerID)
|
||||
pgStr := `SELECT * FROM eth.transaction_cids
|
||||
WHERE header_id = $1
|
||||
pgStr := `SELECT CAST(block_number as Text), header_id, index, tx_hash, cid, mh_key,
|
||||
dst, src, tx_data, tx_type, value
|
||||
FROM eth.transaction_cids
|
||||
WHERE header_id = $1 AND block_number = $2
|
||||
ORDER BY index`
|
||||
var txCIDs []models.TxModel
|
||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
|
||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
// RetrieveReceiptCIDsByByHeaderIDAndTxIDs retrieves receipt CIDs by their associated tx IDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx *sqlx.Tx, headerID string, txHashes []string, blockNumber int64) ([]models.ReceiptModel, error) {
|
||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||
receipt_cids.contract, receipt_cids.contract_hash
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids
|
||||
WHERE tx_id = ANY($1)
|
||||
WHERE tx_id = ANY($2)
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = $1
|
||||
AND transaction_cids.block_number = $3
|
||||
ORDER BY transaction_cids.index`
|
||||
var rctCIDs []models.ReceiptModel
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, headerID, pq.Array(txHashes), blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||
@ -678,8 +701,8 @@ func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64)
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
|
||||
}).Joins("IPLD").Find(&headerCIDs, "block_number = ?", blockNumber).Error
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, "header_cids.block_number = ?", blockNumber).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
@ -689,37 +712,60 @@ func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64)
|
||||
return headerCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash
|
||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
||||
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash (and optionally block number)
|
||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash, blockNumber *big.Int) (HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||
|
||||
var headerCID HeaderCIDRecord
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
conditions := map[string]interface{}{"block_hash": blockHash.String()}
|
||||
if blockNumber != nil {
|
||||
conditions["header_cids.block_number"] = blockNumber.Int64()
|
||||
}
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
|
||||
}).Joins("IPLD").First(&headerCID, "block_hash = ?", blockHash.String()).Error
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, conditions).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return headerCID, err
|
||||
return HeaderCIDRecord{}, err
|
||||
}
|
||||
|
||||
return headerCID, nil
|
||||
if len(headerCIDs) == 0 {
|
||||
return HeaderCIDRecord{}, errHeaderHashNotFound
|
||||
} else if len(headerCIDs) > 1 {
|
||||
return HeaderCIDRecord{}, errMultipleHeadersForHash
|
||||
}
|
||||
|
||||
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
||||
return headerCIDs[0], nil
|
||||
}
|
||||
|
||||
// RetrieveTxCIDByHash returns the tx for the given tx hash (and optionally block number)
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string, blockNumber *big.Int) (TransactionCIDRecord, error) {
|
||||
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||
|
||||
var txCID TransactionCIDRecord
|
||||
var txCIDs []TransactionCIDRecord
|
||||
|
||||
err := ecr.gormDB.Joins("IPLD").First(&txCID, "tx_hash = ?", txHash).Error
|
||||
var err error
|
||||
if blockNumber != nil {
|
||||
err = ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number)) AND transaction_cids.block_number = ?", txHash, blockNumber.Int64()).Error
|
||||
} else {
|
||||
err = ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return txCID, err
|
||||
log.Error("tx retrieval error")
|
||||
return TransactionCIDRecord{}, err
|
||||
}
|
||||
|
||||
return txCID, nil
|
||||
if len(txCIDs) == 0 {
|
||||
return TransactionCIDRecord{}, errTxHashNotFound
|
||||
} else if len(txCIDs) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
return txCIDs[0], nil
|
||||
}
|
||||
|
@ -19,6 +19,9 @@ package eth_test
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -28,10 +31,6 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -249,6 +248,7 @@ var _ = Describe("Retriever", func() {
|
||||
AND header_cids.block_number = $1
|
||||
ORDER BY transaction_cids.index`
|
||||
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
@ -300,6 +300,7 @@ var _ = Describe("Retriever", func() {
|
||||
AND header_cids.block_number = $1
|
||||
ORDER BY transaction_cids.index`
|
||||
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
@ -412,6 +413,7 @@ var _ = Describe("Retriever", func() {
|
||||
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
||||
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
|
||||
BlockNumber: "1",
|
||||
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
||||
|
@ -19,10 +19,14 @@ package eth_test
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -36,15 +40,44 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
parsedABI abi.ABI
|
||||
|
||||
block1StateRoot = common.HexToHash("0xa1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21")
|
||||
rootDataHashBlock1 = "a1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21"
|
||||
rootDataBlock1 = "f871a0577652b625b77bdb5bf77bc43f3125cad7464d679d1575565277d3611b8053e780808080a0fe889f10e5db8f2c2bf355928152a17f6e3bb99a9241ac6d84c77e6264509c798080808080808080a011db0cda34a896dabeb6839bb06a38f49514cfa486435984eb013b7df9ee85c58080"
|
||||
|
||||
block5StateRoot = common.HexToHash("0x572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43")
|
||||
rootDataHashBlock5 = "572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43"
|
||||
rootDataBlock5 = "f8b1a0408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f280808080a0b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91a0180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e48080808080a0422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac995180a02d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba358080"
|
||||
|
||||
account1DataHash = "180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e4"
|
||||
account1Data = "f869a03114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45b846f8440180a04bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0a0ba79854f3dbf6505fdbb085888e25fae8fa97288c5ce8fcd39aa589290d9a659"
|
||||
account1StateLeafKey = "0x6114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45"
|
||||
account1Code = "608060405234801561001057600080fd5b50600436106100415760003560e01c806343d726d61461004657806365f3c31a1461005057806373d4a13a1461007e575b600080fd5b61004e61009c565b005b61007c6004803603602081101561006657600080fd5b810190808035906020019092919050505061017b565b005b610086610185565b6040518082815260200191505060405180910390f35b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610141576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602281526020018061018c6022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018190555050565b6001548156fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a723158205ba91466129f45285f53176d805117208c231ec6343d7896790e6fc4165b802b64736f6c63430005110032"
|
||||
account2DataHash = "2d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba35"
|
||||
account2Data = "f871a03926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2b84ef84c02881bc16d674ec82710a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account3DataHash = "408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f2"
|
||||
account3Data = "f86da030bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2ab84af848058405f5b608a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account4DataHash = "422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac9951"
|
||||
account4Data = "f871a03957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45b84ef84c80883782dace9d9003e8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account5DataHash = "b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91"
|
||||
account5Data = "f871a03380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312ab84ef84c80883782dace9d900000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
|
||||
contractStorageRootBlock5 = common.HexToHash("0x4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0")
|
||||
storageRootDataHashBlock5 = "4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0"
|
||||
storageRootDataBlock5 = "f838a120290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
|
||||
|
||||
contractStorageRootBlock4 = common.HexToHash("0x64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68")
|
||||
storageRootDataHashBlock4 = "64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68"
|
||||
storageRootDataBlock4 = "f8518080a08e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d558080808080808080a07b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af8080808080"
|
||||
|
||||
storageNode1DataHash = "7b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af"
|
||||
storageNode1Data = "e2a0310e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf609"
|
||||
storageNode2DataHash = "8e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d55"
|
||||
storageNode2Data = "f7a0390decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -82,8 +115,8 @@ var _ = Describe("eth state reading tests", func() {
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||
StateDB: ethServerShared.GroupConfig{
|
||||
GroupCacheConfig: &shared.GroupCacheConfig{
|
||||
StateDB: shared.GroupConfig{
|
||||
Name: "eth_state_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
@ -530,4 +563,263 @@ var _ = Describe("eth state reading tests", func() {
|
||||
Expect(header).To(Equal(expectedCanonicalHeader))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getSlice", func() {
|
||||
It("Retrieves the state slice for root path", func() {
|
||||
path := "0x"
|
||||
depth := 3
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "1",
|
||||
"02-total-trie-nodes": "6",
|
||||
"03-leaves": "5",
|
||||
"04-smart-contracts": "1",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{
|
||||
account1DataHash: account1Data,
|
||||
account2DataHash: account2Data,
|
||||
account3DataHash: account3Data,
|
||||
account4DataHash: account4Data,
|
||||
account5DataHash: account5Data,
|
||||
},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{
|
||||
account1StateLeafKey: {
|
||||
StorageRoot: contractStorageRootBlock5.Hex(),
|
||||
EVMCode: account1Code,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for root path with 0 depth", func() {
|
||||
path := "0x"
|
||||
depth := 0
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for a path to an account", func() {
|
||||
path := "0x06"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "2",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "2",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "1",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Head: map[string]string{
|
||||
account1DataHash: account1Data,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{
|
||||
account1StateLeafKey: {
|
||||
StorageRoot: contractStorageRootBlock5.Hex(),
|
||||
EVMCode: account1Code,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for a path to a non-existing account", func() {
|
||||
path := "0x06"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block1StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block1StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
rootDataHashBlock1: rootDataBlock1,
|
||||
},
|
||||
Head: map[string]string{},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
|
||||
It("Retrieves the storage slice for root path", func() {
|
||||
path := "0x"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "1",
|
||||
"02-total-trie-nodes": "3",
|
||||
"03-leaves": "2",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Slice: map[string]string{
|
||||
storageNode1DataHash: storageNode1Data,
|
||||
storageNode2DataHash: storageNode2Data,
|
||||
},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for root path with 0 depth", func() {
|
||||
path := "0x"
|
||||
depth := 0
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for root path with deleted nodes", func() {
|
||||
path := "0x"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock5, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock5.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock5: storageRootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for a path to a storage node", func() {
|
||||
path := "0x0b"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "2",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "2",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Head: map[string]string{
|
||||
storageNode1DataHash: storageNode1Data,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
eth.CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -82,6 +82,7 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
||||
return err
|
||||
}
|
||||
response.Header = models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: headerRLP,
|
||||
Key: cid.String(),
|
||||
}
|
||||
@ -97,6 +98,7 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
||||
return err
|
||||
}
|
||||
response.Uncles[i] = models.IPLDModel{
|
||||
BlockNumber: uncle.Number.String(),
|
||||
Data: uncleRlp,
|
||||
Key: cid.String(),
|
||||
}
|
||||
@ -183,6 +185,7 @@ func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *
|
||||
// TODO: Verify this filter logic.
|
||||
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
||||
response.Receipts = append(response.Receipts, models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: rctIPLDData[idx],
|
||||
Key: rctLeafCID[idx].String(),
|
||||
})
|
||||
@ -205,7 +208,7 @@ func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics [][]string, wa
|
||||
}
|
||||
// If there are no wanted contract addresses, we keep all receipts that match the topic filter
|
||||
if len(wantedAddresses) == 0 {
|
||||
if match := filterMatch(wantedTopics, actualTopics); match == true {
|
||||
if match := filterMatch(wantedTopics, actualTopics); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -215,7 +218,7 @@ func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics [][]string, wa
|
||||
for _, actualAddr := range actualAddresses {
|
||||
if wantedAddr == actualAddr {
|
||||
// we keep the receipt if it matches on the topic filter
|
||||
if match := filterMatch(wantedTopics, actualTopics); match == true {
|
||||
if match := filterMatch(wantedTopics, actualTopics); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -237,10 +240,7 @@ func filterMatch(wantedTopics, actualTopics [][]string) bool {
|
||||
matches++
|
||||
}
|
||||
}
|
||||
if matches == 4 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return matches == 4
|
||||
}
|
||||
|
||||
// returns 1 if the two slices have a string in common, 0 if they do not
|
||||
@ -282,6 +282,7 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
Path: stateNode.Path,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: stateNode.NodeValue,
|
||||
Key: cid.String(),
|
||||
},
|
||||
@ -300,6 +301,7 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: storageNode.NodeValue,
|
||||
Key: cid.String(),
|
||||
},
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -61,12 +61,14 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State2IPLD.RawData(),
|
||||
Key: test_helpers.State2IPLD.Cid().String(),
|
||||
}))
|
||||
}
|
||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State1IPLD.RawData(),
|
||||
Key: test_helpers.State1IPLD.Cid().String(),
|
||||
}))
|
||||
@ -87,6 +89,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds1.Receipts)).To(Equal(1))
|
||||
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
@ -102,6 +105,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds2.Receipts)).To(Equal(1))
|
||||
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
@ -117,6 +121,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds3.Receipts)).To(Equal(1))
|
||||
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
@ -132,6 +137,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds4.Receipts)).To(Equal(1))
|
||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct2IPLD,
|
||||
Key: test_helpers.Rct2CID.String(),
|
||||
}))
|
||||
@ -165,6 +171,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds6.Receipts)).To(Equal(1))
|
||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct2IPLD,
|
||||
Key: test_helpers.Rct2CID.String(),
|
||||
}))
|
||||
@ -181,6 +188,7 @@ var _ = Describe("Filterer", func() {
|
||||
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
||||
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
||||
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State2IPLD.RawData(),
|
||||
Key: test_helpers.State2IPLD.Cid().String(),
|
||||
}))
|
||||
|
@ -17,6 +17,8 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
@ -34,3 +36,8 @@ func ResolveToNodeType(nodeType int) sdtypes.NodeType {
|
||||
return sdtypes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
// Timestamp in milliseconds
|
||||
func makeTimestamp() int64 {
|
||||
return time.Now().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
@ -20,12 +20,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
)
|
||||
|
||||
// Fetcher interface for substituting mocks in tests
|
||||
@ -102,11 +103,17 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
|
||||
// FetchHeader fetches header
|
||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
||||
log.Debug("fetching header ipld")
|
||||
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return models.IPLDModel{}, err
|
||||
}
|
||||
|
||||
headerBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return models.IPLDModel{}, err
|
||||
}
|
||||
return models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: headerBytes,
|
||||
Key: c.CID,
|
||||
}, nil
|
||||
@ -117,11 +124,16 @@ func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]mode
|
||||
log.Debug("fetching uncle iplds")
|
||||
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: uncleBytes,
|
||||
Key: c.CID,
|
||||
}
|
||||
@ -134,11 +146,16 @@ func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IP
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: txBytes,
|
||||
Key: c.CID,
|
||||
}
|
||||
@ -151,12 +168,17 @@ func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]mode
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.LeafMhKey)
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.LeafMhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//nodeVal, err := DecodeLeafNode(rctBytes)
|
||||
rctIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: rctBytes,
|
||||
Key: c.LeafCID,
|
||||
}
|
||||
@ -172,12 +194,17 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]S
|
||||
if stateNode.CID == "" {
|
||||
continue
|
||||
}
|
||||
stateBytes, err := shared.FetchIPLDByMhKey(tx, stateNode.MhKey)
|
||||
blockNumber, err := strconv.ParseUint(stateNode.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, stateNode.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: stateNode.BlockNumber,
|
||||
Data: stateBytes,
|
||||
Key: stateNode.CID,
|
||||
},
|
||||
@ -197,12 +224,17 @@ func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithSta
|
||||
if storageNode.CID == "" || storageNode.StateKey == "" {
|
||||
continue
|
||||
}
|
||||
storageBytes, err := shared.FetchIPLDByMhKey(tx, storageNode.MhKey)
|
||||
blockNumber, err := strconv.ParseUint(storageNode.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, storageNode.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageNodes = append(storageNodes, StorageNode{
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: storageNode.BlockNumber,
|
||||
Data: storageBytes,
|
||||
Key: storageNode.CID,
|
||||
},
|
||||
|
@ -23,9 +23,9 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("IPLDFetcher", func() {
|
||||
|
@ -18,7 +18,9 @@ package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@ -31,132 +33,233 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// node type removed value.
|
||||
// https://github.com/vulcanize/go-ethereum/blob/271f4d01e7e2767ffd8e0cd469bf545be96f2a84/statediff/indexer/helpers.go#L34
|
||||
removedNode = 3
|
||||
|
||||
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
WHERE block_number = $1`
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveUnclesPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
ORDER BY uncle_cids.parent_hash`
|
||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
WHERE block_hash = $1`
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1
|
||||
ORDER BY uncle_cids.parent_hash`
|
||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
WHERE block_number = $1`
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveTransactionsByHashesPgStr = `SELECT cid, data
|
||||
RetrieveTransactionsByHashesPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveTransactionsPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
WHERE block_number = $1
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionByHashPgStr = `SELECT cid, data
|
||||
RetrieveTransactionByHashPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveReceiptsPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE block_number = $1
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = $1
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.block_number, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND block_number <= (SELECT block_number
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2)
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||
ORDER BY block_number DESC
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND block_number <= $2
|
||||
ORDER BY block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND block_number <= $3
|
||||
ORDER BY block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $3)
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||
ORDER BY block_number DESC
|
||||
AND header_cids.block_number <= $2
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT cid, mh_key, block_number, node_type, state_leaf_removed FROM get_storage_at_by_number($1, $2, $3)`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT cid, mh_key, block_number, node_type, state_leaf_removed FROM get_storage_at_by_hash($1, $2, $3)`
|
||||
)
|
||||
|
||||
var EmptyNodeValue = make([]byte, common.HashLength)
|
||||
@ -219,9 +322,9 @@ func (r *IPLDRetriever) RetrieveHeadersByBlockNumber(number uint64) ([]string, [
|
||||
}
|
||||
|
||||
// RetrieveHeaderByHash returns the cid and rlp bytes for the header corresponding to the provided block hash
|
||||
func (r *IPLDRetriever) RetrieveHeaderByHash(hash common.Hash) (string, []byte, error) {
|
||||
func (r *IPLDRetriever) RetrieveHeaderByHash(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
|
||||
headerResult := new(ipldResult)
|
||||
return headerResult.CID, headerResult.Data, r.db.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
|
||||
return headerResult.CID, headerResult.Data, tx.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// RetrieveUnclesByHashes returns the cids and rlp bytes for the uncles corresponding to the provided uncle hashes
|
||||
@ -243,10 +346,25 @@ func (r *IPLDRetriever) RetrieveUnclesByHashes(hashes []common.Hash) ([]string,
|
||||
return cids, uncles, nil
|
||||
}
|
||||
|
||||
// RetrieveUnclesByBlockHash returns the cids and rlp bytes for the uncles corresponding to the provided block hash (of non-omner root block)
|
||||
func (r *IPLDRetriever) RetrieveUnclesByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
|
||||
// RetrieveUncles returns the cids and rlp bytes for the uncles corresponding to the provided block hash, number (of non-omner root block)
|
||||
func (r *IPLDRetriever) RetrieveUncles(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, error) {
|
||||
uncleResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&uncleResults, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
if err := tx.Select(&uncleResults, RetrieveUnclesPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(uncleResults))
|
||||
uncles := make([][]byte, len(uncleResults))
|
||||
for i, res := range uncleResults {
|
||||
cids[i] = res.CID
|
||||
uncles[i] = res.Data
|
||||
}
|
||||
return cids, uncles, nil
|
||||
}
|
||||
|
||||
// RetrieveUnclesByBlockHash returns the cids and rlp bytes for the uncles corresponding to the provided block hash (of non-omner root block)
|
||||
func (r *IPLDRetriever) RetrieveUnclesByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
|
||||
uncleResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&uncleResults, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(uncleResults))
|
||||
@ -298,10 +416,25 @@ func (r *IPLDRetriever) RetrieveTransactionsByHashes(hashes []common.Hash) ([]st
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
|
||||
func (r *IPLDRetriever) RetrieveTransactionsByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
|
||||
// RetrieveTransactions returns the cids and rlp bytes for the transactions corresponding to the provided block hash, number
|
||||
func (r *IPLDRetriever) RetrieveTransactions(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
if err := tx.Select(&txResults, RetrieveTransactionsPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
|
||||
func (r *IPLDRetriever) RetrieveTransactionsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
@ -374,11 +507,35 @@ func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]stri
|
||||
return cids, rcts, nil
|
||||
}
|
||||
|
||||
// RetrieveReceipts returns the cids and rlp bytes for the receipts corresponding to the provided block hash, number.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *IPLDRetriever) RetrieveReceipts(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, []common.Hash, error) {
|
||||
rctResults := make([]rctIpldResult, 0)
|
||||
if err := tx.Select(&rctResults, RetrieveReceiptsPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
txs := make([]common.Hash, len(rctResults))
|
||||
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.LeafCID
|
||||
nodeVal, err := DecodeLeafNode(res.Data)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
rcts[i] = nodeVal
|
||||
txs[i] = common.HexToHash(res.TxHash)
|
||||
}
|
||||
|
||||
return cids, rcts, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
|
||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
|
||||
rctResults := make([]rctIpldResult, 0)
|
||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
if err := tx.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
@ -435,6 +592,8 @@ func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte,
|
||||
|
||||
type nodeInfo struct {
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
Data []byte `db:"data"`
|
||||
NodeType int `db:"node_type"`
|
||||
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||
@ -449,10 +608,19 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Addr
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if accountResult.NodeType == removedNode {
|
||||
if accountResult.NodeType == sdtypes.Removed.Int() {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(accountResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
@ -472,10 +640,16 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if accountResult.NodeType == removedNode {
|
||||
if accountResult.NodeType == sdtypes.Removed.Int() {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
@ -496,9 +670,19 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(add
|
||||
}
|
||||
log.Debugf("getStorageAt: state_leaf_key=%s, storage_hex=%s, storage_leaf_key=%s, block_hash=%s",
|
||||
stateLeafKey.Hex(), key.Hex(), storageHash.Hex(), hash.Hex())
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == sdtypes.Removed.Int() {
|
||||
return "", EmptyNodeValue, EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(storageResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
@ -519,9 +703,16 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(ad
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == sdtypes.Removed.Int() {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
|
@ -18,6 +18,7 @@ package eth
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
|
||||
@ -39,3 +40,10 @@ func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CheckGetSliceResponse(sliceResponse GetSliceResponse, expectedResponse GetSliceResponse) {
|
||||
Expect(sliceResponse.SliceID).To(Equal(expectedResponse.SliceID))
|
||||
Expect(sliceResponse.MetaData.NodeStats).To(Equal(expectedResponse.MetaData.NodeStats))
|
||||
Expect(sliceResponse.TrieNodes).To(Equal(expectedResponse.TrieNodes))
|
||||
Expect(sliceResponse.Leaves).To(Equal(expectedResponse.Leaves))
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
||||
)
|
||||
|
||||
// Test variables
|
||||
@ -35,7 +36,7 @@ var (
|
||||
TestBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
TestBankAddress = crypto.PubkeyToAddress(TestBankKey.PublicKey) //0x71562b71999873DB5b286dF957af199Ec94617F7
|
||||
TestBankFunds = big.NewInt(100000000)
|
||||
Genesis = core.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds)
|
||||
Genesis = test_helpers.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds)
|
||||
|
||||
Account1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
Account2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
|
@ -38,7 +38,7 @@ import (
|
||||
"github.com/multiformats/go-multihash"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// Test variables
|
||||
@ -231,6 +231,7 @@ var (
|
||||
}
|
||||
MockTrxMetaPostPublsh = []models.TxModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
||||
MhKey: Trx1MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
@ -240,6 +241,7 @@ var (
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx2CID.String(),
|
||||
MhKey: Trx2MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
@ -249,6 +251,7 @@ var (
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx3CID.String(),
|
||||
MhKey: Trx3MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
@ -258,6 +261,7 @@ var (
|
||||
Data: MockContractByteCode,
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx4CID.String(),
|
||||
MhKey: Trx4MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
@ -296,24 +300,32 @@ var (
|
||||
|
||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct1CID.String(),
|
||||
LeafMhKey: Rct1MhKey,
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct2CID.String(),
|
||||
LeafMhKey: Rct2MhKey,
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct3CID.String(),
|
||||
LeafMhKey: Rct3MhKey,
|
||||
Contract: ContractAddress.String(),
|
||||
ContractHash: ContractHash,
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct4CID.String(),
|
||||
LeafMhKey: Rct4MhKey,
|
||||
Contract: "",
|
||||
@ -391,6 +403,7 @@ var (
|
||||
}
|
||||
MockStateMetaPostPublish = []models.StateNodeModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: State1CID.String(),
|
||||
MhKey: State1MhKey,
|
||||
Path: []byte{'\x06'},
|
||||
@ -398,6 +411,7 @@ var (
|
||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: State2CID.String(),
|
||||
MhKey: State2MhKey,
|
||||
Path: []byte{'\x0c'},
|
||||
@ -461,6 +475,7 @@ var (
|
||||
StateNodes: MockStateMetaPostPublish,
|
||||
StorageNodes: []models.StorageNodeWithStateKeyModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
Path: []byte{},
|
||||
CID: StorageCID.String(),
|
||||
MhKey: StorageMhKey,
|
||||
@ -483,41 +498,50 @@ var (
|
||||
MockIPLDs = eth.IPLDs{
|
||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||
Header: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: HeaderIPLD.RawData(),
|
||||
Key: HeaderIPLD.Cid().String(),
|
||||
},
|
||||
Transactions: []models.IPLDModel{
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx1IPLD.RawData(),
|
||||
Key: Trx1IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx2IPLD.RawData(),
|
||||
Key: Trx2IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx3IPLD.RawData(),
|
||||
Key: Trx3IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx4IPLD.RawData(),
|
||||
Key: Trx4IPLD.Cid().String(),
|
||||
},
|
||||
},
|
||||
Receipts: []models.IPLDModel{
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct1IPLD,
|
||||
Key: Rct1CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct2IPLD,
|
||||
Key: Rct2CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct3IPLD,
|
||||
Key: Rct3CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct4IPLD,
|
||||
Key: Rct4CID.String(),
|
||||
},
|
||||
@ -527,6 +551,7 @@ var (
|
||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: State1IPLD.RawData(),
|
||||
Key: State1IPLD.Cid().String(),
|
||||
},
|
||||
@ -536,6 +561,7 @@ var (
|
||||
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: State2IPLD.RawData(),
|
||||
Key: State2IPLD.Cid().String(),
|
||||
},
|
||||
@ -548,6 +574,7 @@ var (
|
||||
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: StorageIPLD.RawData(),
|
||||
Key: StorageIPLD.Cid().String(),
|
||||
},
|
||||
@ -567,7 +594,29 @@ var (
|
||||
}
|
||||
|
||||
MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(LondonBlockNum)
|
||||
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie))
|
||||
MockLondonUncles = []*types.Header{
|
||||
{
|
||||
Time: 1,
|
||||
Number: new(big.Int).Add(BlockNumber, big.NewInt(1)),
|
||||
ParentHash: common.HexToHash("0x2"),
|
||||
Root: common.HexToHash("0x1"),
|
||||
TxHash: common.HexToHash("0x1"),
|
||||
ReceiptHash: common.HexToHash("0x1"),
|
||||
Difficulty: big.NewInt(500001),
|
||||
Extra: []byte{},
|
||||
},
|
||||
{
|
||||
Time: 2,
|
||||
Number: new(big.Int).Add(BlockNumber, big.NewInt(1)),
|
||||
ParentHash: common.HexToHash("0x1"),
|
||||
Root: common.HexToHash("0x2"),
|
||||
TxHash: common.HexToHash("0x2"),
|
||||
ReceiptHash: common.HexToHash("0x2"),
|
||||
Difficulty: big.NewInt(500002),
|
||||
Extra: []byte{},
|
||||
},
|
||||
}
|
||||
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, MockLondonUncles, MockLondonReceipts, new(trie.Trie))
|
||||
)
|
||||
|
||||
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block {
|
||||
|
@ -18,7 +18,9 @@ package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -264,3 +266,63 @@ type LogResult struct {
|
||||
TxnIndex int64 `db:"txn_index"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
}
|
||||
|
||||
// GetSliceResponse holds response for the eth_getSlice method
|
||||
type GetSliceResponse struct {
|
||||
SliceID string `json:"sliceId"`
|
||||
MetaData GetSliceResponseMetadata `json:"metadata"`
|
||||
TrieNodes GetSliceResponseTrieNodes `json:"trieNodes"`
|
||||
Leaves map[string]GetSliceResponseAccount `json:"leaves"` // key: Keccak256Hash(address) in hex (leafKey)
|
||||
}
|
||||
|
||||
func (sr *GetSliceResponse) init(path string, depth int, root common.Hash) {
|
||||
sr.SliceID = fmt.Sprintf("%s-%d-%s", path, depth, root.String())
|
||||
sr.MetaData = GetSliceResponseMetadata{
|
||||
NodeStats: make(map[string]string, 0),
|
||||
TimeStats: make(map[string]string, 0),
|
||||
}
|
||||
sr.Leaves = make(map[string]GetSliceResponseAccount)
|
||||
sr.TrieNodes = GetSliceResponseTrieNodes{
|
||||
Stem: make(map[string]string),
|
||||
Head: make(map[string]string),
|
||||
Slice: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *GetSliceResponse) populateMetaData(metaData metaDataFields) {
|
||||
sr.MetaData.NodeStats["00-stem-and-head-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head))
|
||||
sr.MetaData.NodeStats["01-max-depth"] = strconv.Itoa(metaData.maxDepth)
|
||||
sr.MetaData.NodeStats["02-total-trie-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head) + len(sr.TrieNodes.Slice))
|
||||
sr.MetaData.NodeStats["03-leaves"] = strconv.Itoa(metaData.leafCount)
|
||||
sr.MetaData.NodeStats["04-smart-contracts"] = strconv.Itoa(len(sr.Leaves))
|
||||
|
||||
sr.MetaData.TimeStats["00-trie-loading"] = strconv.FormatInt(metaData.trieLoadingTime, 10)
|
||||
sr.MetaData.TimeStats["01-fetch-stem-keys"] = strconv.FormatInt(metaData.stemNodesFetchTime, 10)
|
||||
sr.MetaData.TimeStats["02-fetch-slice-keys"] = strconv.FormatInt(metaData.sliceNodesFetchTime, 10)
|
||||
sr.MetaData.TimeStats["03-fetch-leaves-info"] = strconv.FormatInt(metaData.leavesFetchTime, 10)
|
||||
}
|
||||
|
||||
type GetSliceResponseMetadata struct {
|
||||
TimeStats map[string]string `json:"timeStats"` // stem, state, storage (one by one)
|
||||
NodeStats map[string]string `json:"nodeStats"` // total, leaves, smart contracts
|
||||
}
|
||||
|
||||
type GetSliceResponseTrieNodes struct {
|
||||
Stem map[string]string `json:"stem"` // key: Keccak256Hash(data) in hex, value: trie node data in hex
|
||||
Head map[string]string `json:"head"`
|
||||
Slice map[string]string `json:"sliceNodes"`
|
||||
}
|
||||
|
||||
type GetSliceResponseAccount struct {
|
||||
StorageRoot string `json:"storageRoot"`
|
||||
EVMCode string `json:"evmCode"`
|
||||
}
|
||||
|
||||
type metaDataFields struct {
|
||||
maxDepth int
|
||||
leafCount int
|
||||
trieLoadingTime int64
|
||||
stemNodesFetchTime int64
|
||||
sliceNodesFetchTime int64
|
||||
leavesFetchTime int64
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -91,10 +92,16 @@ func NewClient(endpoint string) *Client {
|
||||
return &Client{client: client}
|
||||
}
|
||||
|
||||
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
|
||||
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, addresses []common.Address) ([]LogResponse, error) {
|
||||
params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
|
||||
if address != nil {
|
||||
params += fmt.Sprintf(`, contract: "%s"`, address.String())
|
||||
|
||||
if addresses != nil {
|
||||
addressStrings := make([]string, len(addresses))
|
||||
for i, address := range addresses {
|
||||
addressStrings[i] = fmt.Sprintf(`"%s"`, address.String())
|
||||
}
|
||||
|
||||
params += fmt.Sprintf(`, addresses: [%s]`, strings.Join(addressStrings, ","))
|
||||
}
|
||||
|
||||
getLogsQuery := fmt.Sprintf(`query{
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -34,8 +35,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -776,8 +777,8 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri
|
||||
hash = header.Hash()
|
||||
}
|
||||
// Construct the range filter
|
||||
filter := filters.NewBlockFilter(b.backend, hash, addresses, topics)
|
||||
|
||||
filterSys := filters.NewFilterSystem(b.backend, filters.Config{})
|
||||
filter := filterSys.NewBlockFilter(hash, addresses, topics)
|
||||
// Run the filter and return all the logs
|
||||
return runFilter(ctx, b.backend, filter)
|
||||
}
|
||||
@ -837,7 +838,7 @@ func (b *Block) Call(ctx context.Context, args struct {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap().Uint64())
|
||||
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -979,7 +980,8 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria
|
||||
topics = *args.Filter.Topics
|
||||
}
|
||||
// Construct the range filter
|
||||
filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
|
||||
filterSys := filters.NewFilterSystem(r.backend, filters.Config{})
|
||||
filter := filterSys.NewRangeFilter(begin, end, addresses, topics)
|
||||
return runFilter(ctx, r.backend, filter)
|
||||
}
|
||||
|
||||
@ -1035,12 +1037,16 @@ func (r *Resolver) GetStorageAt(ctx context.Context, args struct {
|
||||
|
||||
func (r *Resolver) GetLogs(ctx context.Context, args struct {
|
||||
BlockHash common.Hash
|
||||
Contract *common.Address
|
||||
BlockNumber *BigInt
|
||||
Addresses *[]common.Address
|
||||
}) (*[]*Log, error) {
|
||||
|
||||
var filter eth.ReceiptFilter
|
||||
if args.Contract != nil {
|
||||
filter.LogAddresses = []string{args.Contract.String()}
|
||||
|
||||
if args.Addresses != nil {
|
||||
filter.LogAddresses = make([]string, len(*args.Addresses))
|
||||
for i, address := range *args.Addresses {
|
||||
filter.LogAddresses[i] = address.String()
|
||||
}
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
@ -1049,7 +1055,7 @@ func (r *Resolver) GetLogs(ctx context.Context, args struct {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
|
||||
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash, args.BlockNumber.ToInt())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1266,12 +1272,14 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
var headerCIDs []eth.HeaderCIDRecord
|
||||
var err error
|
||||
if args.Condition.BlockHash != nil {
|
||||
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
|
||||
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash), args.Condition.BlockNumber.ToInt())
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
headerCIDs = append(headerCIDs, headerCID)
|
||||
}
|
||||
} else if args.Condition.BlockNumber != nil {
|
||||
headerCIDs, err = r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(args.Condition.BlockNumber.ToInt().Int64())
|
||||
if err != nil {
|
||||
@ -1346,8 +1354,11 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
|
||||
func (r *Resolver) EthTransactionCidByTxHash(ctx context.Context, args struct {
|
||||
TxHash string
|
||||
BlockNumber *BigInt
|
||||
}) (*EthTransactionCID, error) {
|
||||
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash)
|
||||
// Need not check args.BlockNumber for nil as .ToInt() uses a pointer receiver and returns nil if BlockNumber is nil
|
||||
// https://stackoverflow.com/questions/42238624/calling-a-method-on-a-nil-struct-pointer-doesnt-panic-why-not
|
||||
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash, args.BlockNumber.ToInt())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -36,11 +36,11 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/graphql"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("GraphQL", func() {
|
||||
@ -174,7 +174,7 @@ var _ = Describe("GraphQL", func() {
|
||||
|
||||
Describe("eth_getLogs", func() {
|
||||
It("Retrieves logs that matches the provided blockHash and contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
|
||||
logs, err := client.GetLogs(ctx, blockHash, []common.Address{contractAddress})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
@ -191,7 +191,7 @@ var _ = Describe("GraphQL", func() {
|
||||
})
|
||||
|
||||
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
|
||||
logs, err := client.GetLogs(ctx, blockHash, []common.Address{test_helpers.AnotherAddress2})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
@ -207,6 +207,30 @@ var _ = Describe("GraphQL", func() {
|
||||
Expect(logs).To(Equal(expectedLogs))
|
||||
})
|
||||
|
||||
It("Retrieves logs that matches the provided blockHash and multiple contract addresses", func() {
|
||||
logs, err := client.GetLogs(ctx, blockHash, []common.Address{contractAddress, test_helpers.AnotherAddress2})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
{
|
||||
Topics: test_helpers.MockLog1.Topics,
|
||||
Data: hexutil.Bytes(test_helpers.MockLog1.Data),
|
||||
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[0].Hash()},
|
||||
ReceiptCID: test_helpers.Rct1CID.String(),
|
||||
Status: int32(test_helpers.MockReceipts[0].Status),
|
||||
},
|
||||
{
|
||||
Topics: test_helpers.MockLog6.Topics,
|
||||
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
|
||||
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[3].Hash()},
|
||||
ReceiptCID: test_helpers.Rct4CID.String(),
|
||||
Status: int32(test_helpers.MockReceipts[3].Status),
|
||||
},
|
||||
}
|
||||
|
||||
Expect(logs).To(Equal(expectedLogs))
|
||||
})
|
||||
|
||||
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, blockHash, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@ -214,7 +238,7 @@ var _ = Describe("GraphQL", func() {
|
||||
})
|
||||
|
||||
It("Retrieves logs with random hash", func() {
|
||||
logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
|
||||
logs, err := client.GetLogs(ctx, randomHash, []common.Address{contractAddress})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(logs)).To(Equal(0))
|
||||
})
|
||||
@ -272,7 +296,7 @@ var _ = Describe("GraphQL", func() {
|
||||
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockHash: &blockHash})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash())
|
||||
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash(), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(allEthHeaderCIDsResp.Nodes)).To(Equal(1))
|
||||
@ -287,7 +311,7 @@ var _ = Describe("GraphQL", func() {
|
||||
ethTransactionCIDResp, err := client.EthTransactionCIDByTxHash(ctx, txHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash)
|
||||
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compareEthTxCID(*ethTransactionCIDResp, txCID)
|
||||
|
@ -343,12 +343,12 @@ const schema string = `
|
||||
getStorageAt(blockHash: Bytes32!, contract: Address!, slot: Bytes32!): StorageResult
|
||||
|
||||
# Get contract logs by block hash and contract address.
|
||||
getLogs(blockHash: Bytes32!, contract: Address): [Log!]
|
||||
getLogs(blockHash: Bytes32!, blockNumber: BigInt, addresses: [Address!]): [Log!]
|
||||
|
||||
# PostGraphile alternative to get headers with transactions using block number or block hash.
|
||||
allEthHeaderCids(condition: EthHeaderCidCondition): EthHeaderCidsConnection
|
||||
|
||||
# PostGraphile alternative to get transactions using transaction hash.
|
||||
ethTransactionCidByTxHash(txHash: String!): EthTransactionCid
|
||||
ethTransactionCidByTxHash(txHash: String!, blockNumber: BigInt): EthTransactionCid
|
||||
}
|
||||
`
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"github.com/graph-gophers/graphql-go/relay"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// Service encapsulates a GraphQL service.
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/net"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/net"
|
||||
)
|
||||
|
||||
var _ = Describe("API", func() {
|
||||
|
@ -23,17 +23,19 @@ import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
|
||||
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
||||
|
||||
srv := rpc.NewServer()
|
||||
err := node.RegisterApis(apis, modules, srv, false)
|
||||
err := node.RegisterApis(apis, modules, srv)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not register HTTP API: %w", err)
|
||||
}
|
||||
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
|
||||
handler := prom.HTTPMiddleware(node.NewHTTPHandlerStack(srv, cors, vhosts, nil))
|
||||
|
||||
// start http server
|
||||
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
|
||||
|
@ -25,7 +25,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -24,11 +24,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
// StartWSEndpoint starts a websocket endpoint.
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) (net.Listener, *rpc.Server, error) {
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigins []string) (net.Listener, *rpc.Server, error) {
|
||||
// All APIs registered, start the HTTP listener
|
||||
var (
|
||||
listener net.Listener
|
||||
@ -37,7 +37,7 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
|
||||
|
||||
// Register all the APIs exposed by the services
|
||||
handler := rpc.NewServer()
|
||||
err = node.RegisterApis(apis, modules, handler, exposeAll)
|
||||
err = node.RegisterApis(apis, modules, handler)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not register WS API: %w", err)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/statediff/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// APIName is the namespace used for the state diffing service API
|
||||
|
@ -32,8 +32,8 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
|
||||
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
// Env variables
|
||||
@ -223,6 +223,8 @@ func NewConfig() (*Config, error) {
|
||||
if rpcGasCap, ok := new(big.Int).SetString(rpcGasCapStr, 10); ok {
|
||||
c.RPCGasCap = rpcGasCap
|
||||
}
|
||||
} else {
|
||||
c.RPCGasCap = big.NewInt(0)
|
||||
}
|
||||
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||
if chainConfigPath != "" {
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
ethnode "github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
@ -31,8 +32,9 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/net"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/debug"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -146,12 +148,18 @@ func (sap *Service) APIs() []rpc.API {
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create public eth api: %v", err)
|
||||
}
|
||||
return append(apis, rpc.API{
|
||||
|
||||
debugTracerAPI := tracers.APIs(&debug.Backend{Backend: *sap.backend})[0]
|
||||
|
||||
return append(apis,
|
||||
rpc.API{
|
||||
Namespace: eth.APIName,
|
||||
Version: eth.APIVersion,
|
||||
Service: ethAPI,
|
||||
Public: true,
|
||||
})
|
||||
},
|
||||
debugTracerAPI,
|
||||
)
|
||||
}
|
||||
|
||||
// Serve listens for incoming converter data off the screenAndServePayload from the Sync process
|
||||
|
@ -18,11 +18,9 @@ package shared
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
"github.com/ipfs/go-cid"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
node "github.com/ipfs/go-ipld-format"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -50,31 +48,18 @@ func Rollback(tx *sqlx.Tx) {
|
||||
}
|
||||
}
|
||||
|
||||
// PublishIPLD is used to insert an ipld into Postgres blockstore with the provided tx
|
||||
func PublishIPLD(tx *sqlx.Tx, i node.Node) error {
|
||||
dbKey := dshelp.MultihashToDsKey(i.Cid().Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||
raw := i.RawData()
|
||||
_, err := tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
||||
return err
|
||||
// FetchIPLDByMhKeyAndBlockNumber is used to retrieve an ipld from Postgres blockstore with the provided tx, mhkey string and blockNumber
|
||||
func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||
var block []byte
|
||||
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// FetchIPLD is used to retrieve an ipld from Postgres blockstore with the provided tx and cid string
|
||||
func FetchIPLD(tx *sqlx.Tx, cid string) ([]byte, error) {
|
||||
mhKey, err := MultihashKeyFromCIDString(cid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
||||
// FetchIPLD is used to retrieve an IPLD from Postgres mhkey and blockNumber
|
||||
func FetchIPLD(db *sqlx.DB, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||
var block []byte
|
||||
return block, tx.Get(&block, pgStr, mhKey)
|
||||
}
|
||||
|
||||
// FetchIPLDByMhKey is used to retrieve an ipld from Postgres blockstore with the provided tx and mhkey string
|
||||
func FetchIPLDByMhKey(tx *sqlx.Tx, mhKey string) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1`
|
||||
var block []byte
|
||||
return block, tx.Get(&block, pgStr, mhKey)
|
||||
return block, db.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||
@ -92,15 +77,3 @@ func MultihashKeyFromCIDString(c string) (string, error) {
|
||||
dbKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||
}
|
||||
|
||||
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
||||
func PublishRaw(tx *sqlx.Tx, codec, mh uint64, raw []byte) (string, error) {
|
||||
c, err := ipld.RawdataToCid(codec, raw, mh)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||
_, err = tx.Exec(`INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING`, prefixedKey, raw)
|
||||
return c.String(), err
|
||||
}
|
||||
|
@ -58,8 +58,12 @@ func SetupDB() *sqlx.DB {
|
||||
func TearDownDB(db *sqlx.DB) {
|
||||
tx, err := db.Beginx()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM nodes`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.uncle_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
||||
@ -68,6 +72,10 @@ func TearDownDB(db *sqlx.DB) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.state_accounts`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.access_list_elements`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
|
||||
|
@ -1,8 +1,16 @@
|
||||
# Clear up existing docker images and volume.
|
||||
#!/bin/bash
|
||||
|
||||
# Remove any existing containers / volumes
|
||||
docker-compose down --remove-orphans --volumes
|
||||
|
||||
docker-compose -f docker-compose.yml up -d ipld-eth-db
|
||||
sleep 10
|
||||
# Spin up DB and run migrations
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
sleep 30
|
||||
|
||||
# Run unit tests
|
||||
go clean -testcache
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
||||
|
||||
# Clean up
|
||||
docker-compose down --remove-orphans --volumes
|
||||
rm -rf out/
|
||||
|
@ -2,18 +2,24 @@
|
||||
|
||||
## Setup
|
||||
|
||||
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator) and [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||
|
||||
- Checkout [v3 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.17-statediff-3.2.1) in go-ethereum repo.
|
||||
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.2.1-alpha) in ipld-eth-db repo.
|
||||
```bash
|
||||
# In ipld-eth-db repo.
|
||||
git checkout v4.2.1-alpha
|
||||
```
|
||||
|
||||
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.23-statediff-4.2.0-alpha) in go-ethereum repo.
|
||||
```bash
|
||||
# In go-ethereum repo.
|
||||
git checkout v1.10.17-statediff-3.2.1
|
||||
git checkout v1.10.23-statediff-4.2.0-alpha
|
||||
```
|
||||
|
||||
- Checkout working commit in stack-orchestrator repo.
|
||||
```bash
|
||||
# In stack-orchestrator repo.
|
||||
git checkout fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||
git checkout f2fd766f5400fcb9eb47b50675d2e3b1f2753702
|
||||
```
|
||||
|
||||
## Run
|
||||
@ -27,16 +33,34 @@
|
||||
|
||||
- Run integration tests:
|
||||
|
||||
- Update (Replace existing content) config file [config.sh](https://github.com/vulcanize/stack-orchestrator/blob/main/config.sh) in stack-orchestrator repo:
|
||||
- In stack-orchestrator repo, create config file:
|
||||
|
||||
```bash
|
||||
cd helper-scripts
|
||||
|
||||
./create-config.sh
|
||||
```
|
||||
|
||||
A `config.sh` will be created in the root directory.
|
||||
|
||||
- Update/Edit the generated config file with:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_db=~/ipld-eth-db/
|
||||
|
||||
# Path to go-ethereum repo.
|
||||
vulcanize_go_ethereum=~/go-ethereum/
|
||||
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=true
|
||||
eth_forward_eth_calls=false
|
||||
eth_proxy_on_error=false
|
||||
@ -44,26 +68,30 @@
|
||||
```
|
||||
|
||||
- Run stack-orchestrator:
|
||||
|
||||
```bash
|
||||
# In stack-orchestrator root directory.
|
||||
cd helper-scripts
|
||||
|
||||
./wrapper.sh \
|
||||
-e docker \
|
||||
-d ../docker/latest/docker-compose-db.yml \
|
||||
-d ../docker/local/docker-compose-db-sharding.yml \
|
||||
-d ../docker/local/docker-compose-go-ethereum.yml \
|
||||
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
||||
-d ../docker/local/docker-compose-contract.yml \
|
||||
-v remove \
|
||||
-p ../config.sh
|
||||
```
|
||||
|
||||
- Run test:
|
||||
|
||||
```bash
|
||||
# In ipld-eth-server root directory.
|
||||
./scripts/run_integration_test.sh
|
||||
```
|
||||
|
||||
- Update `config.sh` file:
|
||||
- Update stack-orchestrator `config.sh` file:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
@ -73,6 +101,10 @@
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=false
|
||||
eth_forward_eth_calls=true
|
||||
eth_proxy_on_error=false
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
integration "github.com/vulcanize/ipld-eth-server/v3/test"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
integration "github.com/cerc-io/ipld-eth-server/v4/test"
|
||||
)
|
||||
|
||||
var _ = Describe("Integration test", func() {
|
||||
@ -431,7 +431,7 @@ var _ = Describe("Integration test", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.ChainID(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -16,8 +16,8 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||
integration "github.com/vulcanize/ipld-eth-server/v3/test"
|
||||
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
|
||||
integration "github.com/cerc-io/ipld-eth-server/v4/test"
|
||||
)
|
||||
|
||||
const nonExistingBlockHash = "0x111111111111111111111111111111111111111111111111111111111111111"
|
||||
|
@ -10,11 +10,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/types"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
integration "github.com/vulcanize/ipld-eth-server/v3/test"
|
||||
|
||||
integration "github.com/cerc-io/ipld-eth-server/v4/test"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -226,8 +226,8 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
|
||||
Context("one contract being watched", func() {
|
||||
It("indexes state only for the watched contract", func() {
|
||||
operation := types.Add
|
||||
args := []types.WatchAddressArg{
|
||||
operation := sdtypes.Add
|
||||
args := []sdtypes.WatchAddressArg{
|
||||
{
|
||||
Address: contract1.Address,
|
||||
CreatedAt: uint64(contract1.BlockNumber),
|
||||
@ -308,8 +308,8 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
|
||||
Context("contract added to a non-empty watch-list", func() {
|
||||
It("indexes state only for the watched contracts", func() {
|
||||
operation := types.Add
|
||||
args := []types.WatchAddressArg{
|
||||
operation := sdtypes.Add
|
||||
args := []sdtypes.WatchAddressArg{
|
||||
{
|
||||
Address: contract2.Address,
|
||||
CreatedAt: uint64(contract2.BlockNumber),
|
||||
@ -392,8 +392,8 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
|
||||
Context("contract removed from the watch-list", func() {
|
||||
It("indexes state only for the watched contract", func() {
|
||||
operation := types.Remove
|
||||
args := []types.WatchAddressArg{
|
||||
operation := sdtypes.Remove
|
||||
args := []sdtypes.WatchAddressArg{
|
||||
{
|
||||
Address: contract1.Address,
|
||||
CreatedAt: uint64(contract1.BlockNumber),
|
||||
@ -474,8 +474,8 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
|
||||
Context("list of watched addresses set", func() {
|
||||
It("indexes state only for the watched contracts", func() {
|
||||
operation := types.Set
|
||||
args := []types.WatchAddressArg{
|
||||
operation := sdtypes.Set
|
||||
args := []sdtypes.WatchAddressArg{
|
||||
{
|
||||
Address: contract1.Address,
|
||||
CreatedAt: uint64(contract1.BlockNumber),
|
||||
@ -562,8 +562,8 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
|
||||
Context("list of watched addresses cleared", func() {
|
||||
It("indexes state for all the contracts", func() {
|
||||
operation := types.Clear
|
||||
args := []types.WatchAddressArg{}
|
||||
operation := sdtypes.Clear
|
||||
args := []sdtypes.WatchAddressArg{}
|
||||
ipldErr := ipldRPCClient.Call(nil, ipldMethod, operation, args)
|
||||
Expect(ipldErr).ToNot(HaveOccurred())
|
||||
|
||||
@ -656,7 +656,7 @@ var _ = Describe("WatchAddress integration test", func() {
|
||||
})
|
||||
|
||||
It("returns an error on args of invalid type", func() {
|
||||
operation := types.Add
|
||||
operation := sdtypes.Add
|
||||
args := []string{"WrongArg"}
|
||||
|
||||
gethErr := gethRPCClient.Call(nil, gethMethod, operation, args)
|
||||
|
@ -33,7 +33,7 @@ func init() {
|
||||
func setTestConfig() {
|
||||
vip := viper.New()
|
||||
vip.SetConfigName("testing")
|
||||
vip.AddConfigPath("$GOPATH/src/github.com/vulcanize/ipld-eth-server/environments/")
|
||||
vip.AddConfigPath("$GOPATH/src/github.com/cerc-io/ipld-eth-server/environments/")
|
||||
if err := vip.ReadInConfig(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
@ -19,10 +19,10 @@ package version
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
Major = 2 // Major version component of the current release
|
||||
Minor = 0 // Minor version component of the current release
|
||||
Major = 4 // Major version component of the current release
|
||||
Minor = 2 // Minor version component of the current release
|
||||
Patch = 0 // Patch version component of the current release
|
||||
Meta = "" // Version metadata to append to the version string
|
||||
Meta = "alpha" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
// Version holds the textual version string.
|
||||
|
Loading…
Reference in New Issue
Block a user