Compare commits

..

10 Commits

Author SHA1 Message Date
Michael Shaw
70b6f62548 targeted tag was incomplete 2022-09-19 14:41:19 -04:00
Michael Shaw
e7fb82475b first try at git.vdb.to as conatiner repository 2022-09-19 13:20:30 -04:00
Michael Shaw
d2be539fd1 switching back to github for running unit test 2022-09-19 00:11:54 -04:00
Michael Shaw
7c77b491ed more cerc-io migrations to utilize new git.vdb.to gitea instance 2022-09-18 22:11:25 -04:00
Michael Shaw
31407b5e79 another test lost Describe imports... not caught in go build -a??? 2022-09-16 15:25:25 -04:00
Michael Shaw
9cc02cae23 another test lost Describe imports 2022-09-16 15:07:59 -04:00
Michael Shaw
7b1dab40d5 cleaning up more vulcanize refs in ci/cd 2022-09-16 11:03:28 -04:00
Michael Shaw
89c321d8cf Describe imports got removed 2022-09-14 16:12:08 -04:00
Michael Shaw
1dd5b9ab5c cerc refactor updates for dependencies 2022-09-14 15:50:24 -04:00
Michael Shaw
44ccf2bd0b cerc refactor waiting on unpublished dependencies 2022-09-14 01:20:16 -04:00
113 changed files with 23264 additions and 8931 deletions

View File

@ -1,16 +1,16 @@
.git
.travis.yml
.idea
bin
.gitignore
.private_blockchain_password
.travis.yml
integration_test
LICENSE
postgraphile
.private_blockchain_password
README.md
integration
test
scripts
Supfile
test_config
.travis.yml
vulcanizedb.log
Dockerfile
**/node_modules

79
.github/workflows/on-pr-publish.yaml vendored Normal file
View File

@ -0,0 +1,79 @@
name: Test, Build, and/or Publish
on:
release:
types: [published]
pull_request:
jobs:
pre_job:
# continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest
# Map a step output to a job output
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v4
with:
# All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: "never"
skip_after_successful_duplicate: "true"
do_not_skip: '["workflow_dispatch", "schedule"]'
run-tests:
uses: ./.github/workflows/tests.yaml
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
needs: pre_job
secrets:
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
BUILD_KEY: ${{ secrets.BUILD_KEY }}
with:
STACK_ORCHESTRATOR_REF: "f2fd766f5400fcb9eb47b50675d2e3b1f2753702"
GO_ETHEREUM_REF: "be1757b9fd884cb20c8d7faac8fa81fc49bb7216"
IPLD_ETH_DB_REF: "be345e0733d2c025e4082c5154e441317ae94cf7"
build:
name: Run docker build
runs-on: ubuntu-latest
needs: run-tests
if: |
always() &&
(needs.run-tests.result == 'success' || needs.run-tests.result == 'skipped') &&
github.event_name == 'release'
steps:
- uses: actions/checkout@v2
- name: Get the version
id: vars
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
- name: Run docker build
run: make docker-build
- name: Tag docker image
run: docker tag cerc-io/ipld-eth-server git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
- name: Docker Login
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
- name: Docker Push
run: docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
push_to_registries:
name: Push Docker image to Docker Hub
runs-on: ubuntu-latest
if: |
always() &&
(needs.build.result == 'success') &&
github.event_name == 'release'
needs: build
steps:
- name: Get the version
id: vars
run: |
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
- name: Docker Login to Github Registry
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
- name: Docker Pull
run: docker pull git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
- name: Docker Login to Docker Registry
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
- name: Tag docker image
run: docker tag git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} cerc-io/ipld-eth-server:${{steps.vars.outputs.tag}}
- name: Docker Push to Docker Hub
run: docker push cerc-io/ipld-eth-server:${{steps.vars.outputs.tag}}

View File

@ -1,28 +0,0 @@
name: Publish Docker image
on:
release:
types: [published, edited]
jobs:
build:
name: Build and publish image
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- id: vars
name: Output SHA and version tag
run: |
echo "sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
- name: Build and tag Docker image
env:
GIT_VDBTO_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
run: |
docker build . \
--build-arg GIT_VDBTO_TOKEN \
-t git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} \
-t git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}
- name: Push Docker tags
run: |
echo ${{ secrets.GITEA_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}

29
.github/workflows/run_unit_test.sh vendored Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
set -e
# Set up repo
start_dir=$(pwd)
temp_dir=$(mktemp -d)
cd $temp_dir
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
cd ipld-eth-server
## Remove the branch and github related info. This way future runs wont be confused.
rm -f /tmp/git_head_ref /tmp/git_repository
# Spin up DB and run migrations
echo 'docker-compose up -d migrations ipld-eth-db'
docker-compose up -d migrations ipld-eth-db
trap "docker-compose down -v --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
sleep 30
# Remove old logs so there's no confusion, then run test
rm -f /tmp/test.log /tmp/return_test.txt
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
echo $? > /tmp/return_test.txt
# Clean up
docker-compose down -v --remove-orphans
cd $start_dir
rm -fr $temp_dir

View File

@ -1,111 +1,212 @@
name: Test the stack.
on:
workflow_call:
# Job headers are hidden when not top-level - run them directly for readability until fixed:
# https://github.com/go-gitea/gitea/issues/26736
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
env:
# Needed until we can incorporate docker startup into the executor container
DOCKER_HOST: unix:///var/run/dind.sock
SO_VERSION: v1.1.0-e0b5318-202309201927 # contains fixes for plugeth stack
secrets:
BUILD_HOSTNAME:
required: true
BUILD_USERNAME:
required: true
BUILD_KEY:
required: true
inputs:
STACK_ORCHESTRATOR_REF:
required: true
type: string
GO_ETHEREUM_REF:
required: true
type: string
IPLD_ETH_DB_REF:
required: true
type: string
jobs:
build:
name: Run docker build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run docker build
run: make docker-build
test:
name: Run unit tests
env:
GOPATH: /tmp/go
# To run the unit tests you need to add secrets to your repository.
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
BUILD_KEY: ${{ secrets.BUILD_KEY }}
#strategy:
# matrix:
# go-version: [1.16.x, 1.17.x, 1.18.x]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
check-latest: true
- name: Run dockerd
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: Run DB container
run: docker compose -f test/compose-db.yml up --wait --quiet-pull
- name: Configure Gitea access
env:
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
run: |
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf https://git.vdb.to/
- name: Build and run tests
run: |
go install github.com/onsi/ginkgo/v2/ginkgo
ginkgo -v -r --skipPackage=./integration
- uses: actions/checkout@v2
integration-test:
# Passed experience with GHA has taught me to store variables in files instead of passing them as variables.
- name: Output variables to files
run: |
echo $GITHUB_REPOSITORY > /tmp/git_repository
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
echo ${{ env.BUILD_KEY }} >> /tmp/key
echo "-----END OPENSSH PRIVATE KEY-----" >> /tmp/key
chmod 400 /tmp/key
cat /tmp/git_repository
cat /tmp/git_head_ref
- name: Raw SCP
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
- name: Trigger Unit Test
run: |
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} go install github.com/onsi/ginkgo/ginkgo@latest
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} /tmp/run_unit_test.sh
- name: Get the logs and cat them
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/test.log .
cat ./test.log
- name: Check Error Code
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/return_test.txt .
[ $(cat ./return_test.txt) -eq 0 ]
integrationtest:
name: Run integration tests
env:
GOPATH: /tmp/go
DB_WRITE: true
ETH_FORWARD_ETH_CALLS: false
ETH_PROXY_ON_ERROR: false
ETH_HTTP_PATH: "go-ethereum:8545"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/setup-go@v3
with:
go-version-file: 'go.mod'
go-version: "1.18.x"
check-latest: true
- name: Run dockerd
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: Build server image
env:
GIT_VDBTO_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
run: docker build . -t cerc/ipld-eth-server:local --build-arg GIT_VDBTO_TOKEN
- name: Install jq
env:
DEBIAN_FRONTEND: noninteractive
run: apt-get update && apt-get install -y jq
- name: Install Python
uses: actions/setup-python@v4
- uses: actions/checkout@v2
with:
python-version: '3.11'
- name: Install stack-orchestrator
uses: actions/checkout@v3
path: "./ipld-eth-server"
- uses: actions/checkout@v2
with:
repository: cerc-io/stack-orchestrator
ref: ${{ env.SO_VERSION }}
path: ./stack-orchestrator
- run: pip install ./stack-orchestrator
- name: Configure Gitea access
env:
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
- uses: actions/checkout@v2
with:
ref: ${{ inputs.GO_ETHEREUM_REF }}
repository: cerc-io/go-ethereum
path: "./go-ethereum/"
- uses: actions/checkout@v2
with:
ref: ${{ inputs.IPLD_ETH_DB_REF }}
repository: cerc-io/ipld-eth-db
path: "./ipld-eth-db/"
- name: Create config file
run: |
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf https://git.vdb.to/
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
echo db_write=$DB_WRITE >> ./config.sh
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
cat ./config.sh
- name: Build geth
run: |
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
./compile-geth.sh \
-p "$GITHUB_WORKSPACE/config.sh" \
-e docker
- name: Run docker compose
run: |
docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
--env-file "$GITHUB_WORKSPACE/config.sh" \
up -d --build
- name: Test
run: |
cd $GITHUB_WORKSPACE/ipld-eth-server
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest
- name: Run testnet stack
env:
CERC_GO_AUTH_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
run: ./scripts/integration-setup.sh
- name: Run server
env:
ETH_FORWARD_ETH_CALLS: false
run: docker compose -f test/compose-server.yml up --wait --quiet-pull
- name: Run tests
integrationtest_forwardethcalls:
name: Run integration tests for direct proxy fall-through of eth_calls
env:
GOPATH: /tmp/go
DB_WRITE: false
ETH_FORWARD_ETH_CALLS: true
ETH_PROXY_ON_ERROR: false
ETH_HTTP_PATH: "go-ethereum:8545"
runs-on: ubuntu-latest
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/setup-go@v3
with:
go-version: "1.18.x"
check-latest: true
- uses: actions/checkout@v2
with:
path: "./ipld-eth-server"
- uses: actions/checkout@v2
with:
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
- uses: actions/checkout@v2
with:
ref: ${{ inputs.GO_ETHEREUM_REF }}
repository: cerc-io/go-ethereum
path: "./go-ethereum/"
- uses: actions/checkout@v2
with:
ref: ${{ inputs.IPLD_ETH_DB_REF }}
repository: cerc-io/ipld-eth-db
path: "./ipld-eth-db/"
- name: Create config file
run: |
sleep 30
go install github.com/onsi/ginkgo/v2/ginkgo
ginkgo -v --label-filter '!proxy' -r ./integration
- name: Run testnet stack without statediff
env:
CERC_RUN_STATEDIFF: false
SKIP_BUILD: 1
run: ./scripts/integration-setup.sh
- name: Run server with call forwarding
env:
ETH_FORWARD_ETH_CALLS: true
run: docker compose -f test/compose-server.yml up --wait --quiet-pull
- name: Run eth_call proxy tests
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
echo db_write=$DB_WRITE >> ./config.sh
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
cat ./config.sh
- name: Build geth
run: |
sleep 30
go install github.com/onsi/ginkgo/v2/ginkgo
ginkgo -v --label-filter 'proxy' -r ./integration
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
./compile-geth.sh \
-p "$GITHUB_WORKSPACE/config.sh" \
-e docker
- name: Run docker compose
run: |
docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
--env-file "$GITHUB_WORKSPACE/config.sh" \
up -d --build
- name: Test
run: |
cd $GITHUB_WORKSPACE/ipld-eth-server
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

17
.gitignore vendored
View File

@ -1,2 +1,19 @@
.idea
.vscode
test_data_dir/
contracts/*
environments/*.toml
Vagrantfile
vagrant*.sh
.vagrant
test_scripts/
ipld-eth-server
postgraphile/build/
postgraphile/node_modules/
postgraphile/package-lock.json
vulcanizedb.log
db/migrations/20*.sql
plugins/*.so
postgraphile/*.toml
postgraphile/schema.graphql
vulcanizedb.pem

View File

@ -1,29 +1,28 @@
FROM golang:1.19-alpine as debugger
FROM golang:1.18-alpine as builder
# Include dlv
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM golang:1.19-alpine as builder
RUN apk --update --no-cache add gcc musl-dev binutils-gold git
RUN apk --update --no-cache add make git g++ linux-headers
# DEBUG
RUN apk add busybox-extras
# Build ipld-eth-server
WORKDIR /go/src/github.com/cerc-io/ipld-eth-server
ARG GIT_VDBTO_TOKEN
# Cache the modules
ENV GO111MODULE=on
COPY go.mod .
COPY go.sum .
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
go mod download && \
rm -f ~/.gitconfig
RUN go mod download
COPY . .
# Build the binary
RUN GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
# Copy migration tool
WORKDIR /
ARG GOOSE_VER="v2.6.0"
ADD https://github.com/pressly/goose/releases/download/${GOOSE_VER}/goose-linux64 ./goose
RUN chmod +x ./goose
# app container
FROM alpine
@ -44,9 +43,7 @@ COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/ipld-eth-server
# keep binaries immutable
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-server/ipld-eth-server ipld-eth-server
COPY --from=builder /goose goose
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-server/environments environments
# Allow for debugging
COPY --from=debugger /go/bin/dlv /usr/local/bin/
ENTRYPOINT ["/app/entrypoint.sh"]

View File

@ -51,19 +51,19 @@ TEST_CONNECT_STRING = postgresql://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATAB
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
.PHONY: test
test:
test: | $(GOOSE)
go vet ./...
go fmt ./...
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test
.PHONY: integrationtest
integrationtest:
integrationtest: | $(GOOSE)
go vet ./...
go fmt ./...
go run github.com/onsi/ginkgo/ginkgo -r test/ -v
.PHONY: test_local
test_local:
test_local: | $(GOOSE)
go vet ./...
go fmt ./...
./scripts/run_unit_test.sh

View File

@ -22,7 +22,7 @@ Additional, unique endpoints are exposed which utilize the new indexes and state
## Dependencies
Minimal build dependencies
* Go (1.19)
* Go (1.18)
* Git
* GCC compiler
* This repository
@ -33,9 +33,9 @@ External dependency
## Install
Start by downloading ipld-eth-server and moving into the repo:
`GO111MODULE=off go get -d github.com/cerc-io/ipld-eth-server/v5`
`GO111MODULE=off go get -d github.com/cerc-io/ipld-eth-server/v4`
`cd $GOPATH/src/github.com/cerc-io/ipld-eth-server/v5@v5.x.x`
`cd $GOPATH/src/github.com/cerc-io/ipld-eth-server/v4@v4.x.x`
Then, build the binary:
@ -60,17 +60,18 @@ The corresponding CLI flags can be found with the `./ipld-eth-server serve --hel
password = "" # $DATABASE_PASSWORD
[log]
level = "info" # $LOG_LEVEL
level = "info" # $LOGRUS_LEVEL
[server]
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SERVER_IPC_PATH
wsPath = "127.0.0.1:8081" # $SERVER_WS_PATH
httpPath = "127.0.0.1:8082" # $SERVER_HTTP_PATH
graphql = true # $SERVER_GRAPHQL
graphqlPath = "" # $SERVER_GRAPHQL_PATH
graphqlEndpoint = "" # $SERVER_GRAPHQL_ENDPOINT
[ethereum]
chainID = "1" # $ETH_CHAIN_ID
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
nodeID = "arch1" # $ETH_NODE_ID
@ -79,9 +80,9 @@ The corresponding CLI flags can be found with the `./ipld-eth-server serve --hel
networkID = "1" # $ETH_NETWORK_ID
```
The `database` fields are for connecting to a Postgres database that has been/is being populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
The `server` fields set the paths for exposing the ipld-eth-server endpoints
The `ethereum` fields set the chainID and default sender address to use for EVM simulation, and can optionally be used to configure a remote eth node to forward cache misses to
The `database` fields are for connecting to a Postgres database that has been/is being populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
The `server` fields set the paths for exposing the ipld-eth-server endpoints
The `ethereum` fields set the chainID and default sender address to use for EVM simulation, and can optionally be used to configure a remote eth node to forward cache misses to
### Endpoints
@ -91,35 +92,61 @@ TODO: Port the IPLD RPC subscription endpoints after the decoupling
#### Ethereum JSON-RPC
ipld-eth-server currently recapitulates portions of the Ethereum JSON-RPC api standard.
The currently supported standard endpoints are:
- `eth_call`
- `eth_getBalance`
- `eth_getStorageAt`
- `eth_getCode`
- `eth_getProof`
- `eth_blockNumber`
- `eth_getHeaderByNumber`
- `eth_getHeaderByHash`
- `eth_getBlockByNumber`
- `eth_getBlockByHash`
- `eth_getTransactionCount`
- `eth_getBlockTransactionCountByHash`
- `eth_getBlockTransactionCountByNumber`
- `eth_getTransactionByHash`
- `eth_getRawTransactionByHash`
- `eth_getTransactionByBlockHashAndIndex`
- `eth_getTransactionByBlockNumberAndIndex`
- `eth_getRawTransactionByBlockHashAndIndex`
- `eth_getRawTransactionByBlockNumberAndIndex`
- `eth_getTransactionReceipt`
- `eth_getLogs`
- `eth_getUncleCountByBlockHash`
- `eth_getUncleCountByBlockNumber`
- `eth_getUncleByBlockHashAndIndex`
- `eth_getUncleByBlockNumberAndIndex`
The currently supported standard endpoints are:
`eth_call`
`eth_getBalance`
`eth_getStorageAt`
`eth_getCode`
`eth_getProof`
`eth_blockNumber`
`eth_getHeaderByNumber`
`eth_getHeaderByHash`
`eth_getBlockByNumber`
`eth_getBlockByHash`
`eth_getTransactionCount`
`eth_getBlockTransactionCountByHash`
`eth_getBlockTransactionCountByNumber`
`eth_getTransactionByHash`
`eth_getRawTransactionByHash`
`eth_getTransactionByBlockHashAndIndex`
`eth_getTransactionByBlockNumberAndIndex`
`eth_getRawTransactionByBlockHashAndIndex`
`eth_getRawTransactionByBlockNumberAndIndex`
`eth_getTransactionReceipt`
`eth_getLogs`
`eth_getUncleCountByBlockHash`
`eth_getUncleCountByBlockNumber`
`eth_getUncleByBlockHashAndIndex`
`eth_getUncleByBlockNumberAndIndex`
TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice)
### CLI Options and Environment variables
| CLI Option | Environment Variable | Default Value | Comment |
| ----------------------------- | ----------------------------- | ---------------- | ----------------------------------- |
| `database-hostname` | `DATABASE_HOSTNAME` | localhost | IPLD database host |
| `database-port` | `DATABASE_PORT` | 5432 | IPLD database port |
| `database-name` | `DATABASE_NAME` | vulcanize_public | IPLD database name |
| `database-user` | `DATABASE_USER` | | IPLD database user |
| `database-password` | `DATABASE_PASSWORD` | | IPLD database password |
| `eth-server-graphql` | `ETH_SERVER_GRAPHQL` | false | If `true` enable Eth GraphQL Server |
| `eth-server-graphql-path` | `ETH_SERVER_GRAPHQLPATH` | | If `eth-server-graphql` set to true, endpoint url for graphql server (host:port) |
| `eth-server-http` | `ETH_SERVER_HTTP` | true | If `true` enable Eth HTTP JSON-RPC Server |
| `eth-server-http-path` | `ETH_SERVER_HTTPPATH` | | If `eth-server-http` set to `true`, endpoint url for Eth HTTP JSON-RPC server (host:port) |
| `eth-server-ws` | `ETH_SERVER_WS` | false | If `true` enable Eth WS JSON-RPC Server |
| `eth-server-ws-path` | `ETH_SERVER_WSPATH` | | If `eth-server-ws` set to `true`, endpoint url for Eth WS JSON-RPC server (host:port) |
| `eth-server-ipc` | `ETH_SERVER_IPC` | false | If `true` enable Eth IPC JSON-RPC Server |
| `eth-server-ipc-path` | `ETH_SERVER_IPC_PATH` | | If `eth-server-ws` set to `true`, path for Eth IPC JSON-RPC server |
| `ipld-server-graphql` | `IPLD_SERVER_GRAPHQL` | false | If `true` enable IPLD GraphQL Server |
| `ipld-server-graphql-path` | `IPLD_SERVER_GRAPHQLPATH` | | If `ipld-server-graphql` set to true, endpoint url for graphql server (host:port) |
| `ipld-postgraphile-path` | `IPLD_POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of IPLD db |
| `tracing-http-path` | `TRACING_HTTPPATH` | | If `ipld-server-graphql` set to true, http url for tracing server |
| `tracing-postgraphile-path` | `TRACING.POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of tracing db |
### Testing
Follow steps in [test/README.md](./test/README.md)

16
chain.json Normal file
View File

@ -0,0 +1,16 @@
{
"chainId": 4,
"homesteadBlock": 1,
"eip150Block": 2,
"eip150Hash": "0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9",
"eip155Block": 3,
"eip158Block": 3,
"byzantiumBlock": 3,
"constantinopleBlock": 3,
"petersburgBlock": 3,
"istanbulBlock": 3,
"clique": {
"period": 15,
"epoch": 30000
}
}

View File

@ -18,15 +18,16 @@ package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
)
var (
@ -49,7 +50,24 @@ func Execute() {
}
func initFuncs(cmd *cobra.Command, args []string) {
log.Init()
viper.BindEnv("log.file", "LOGRUS_FILE")
logfile := viper.GetString("log.file")
if logfile != "" {
file, err := os.OpenFile(logfile,
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err == nil {
log.Infof("Directing output to %s", logfile)
log.SetOutput(file)
} else {
log.SetOutput(os.Stdout)
log.Info("Failed to log to file, using default stdout")
}
} else {
log.SetOutput(os.Stdout)
}
if err := logLevel(); err != nil {
log.Fatal("Could not set log level: ", err)
}
if viper.GetBool("metrics") {
prom.Init()
@ -65,6 +83,20 @@ func initFuncs(cmd *cobra.Command, args []string) {
}
}
func logLevel() error {
viper.BindEnv("log.level", "LOGRUS_LEVEL")
lvl, err := log.ParseLevel(viper.GetString("log.level"))
if err != nil {
return err
}
log.SetLevel(lvl)
if lvl > log.InfoLevel {
log.SetReportCaller(true)
}
log.Info("Log level set to ", lvl.String())
return nil
}
func init() {
cobra.OnInitialize(initConfig)
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))

View File

@ -17,6 +17,7 @@ package cmd
import (
"errors"
"github.com/mailgun/groupcache/v2"
"net/http"
"net/url"
"os"
@ -25,16 +26,17 @@ import (
"sync"
"time"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/mailgun/groupcache/v2"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/gap-filler/pkg/mux"
"github.com/cerc-io/ipld-eth-server/v5/pkg/graphql"
srpc "github.com/cerc-io/ipld-eth-server/v5/pkg/rpc"
s "github.com/cerc-io/ipld-eth-server/v5/pkg/serve"
v "github.com/cerc-io/ipld-eth-server/v5/version"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/graphql"
srpc "github.com/cerc-io/ipld-eth-server/v4/pkg/rpc"
s "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
v "github.com/cerc-io/ipld-eth-server/v4/version"
)
var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
@ -43,7 +45,9 @@ var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
var serveCmd = &cobra.Command{
Use: "serve",
Short: "serve chain data from PG-IPFS",
Long: `This command configures a VulcanizeDB ipld-eth-server.`,
Long: `This command configures a VulcanizeDB ipld-eth-server.
`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
@ -52,29 +56,25 @@ var serveCmd = &cobra.Command{
}
func serve() {
logWithCommand.Infof("ipld-eth-server version: %s", v.VersionWithMeta)
logWithCommand.Infof("running ipld-eth-server version: %s", v.VersionWithMeta)
var forwardPayloadChan chan eth.ConvertedPayload
wg := new(sync.WaitGroup)
logWithCommand.Debug("loading server configuration variables")
serverConfig, err := s.NewConfig()
if err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Debugf("server config: %+v", serverConfig)
logWithCommand.Infof("server config: %+v", serverConfig)
logWithCommand.Debug("initializing new server service")
server, err := s.NewServer(serverConfig)
if err != nil {
logWithCommand.Fatal(err)
}
if serverConfig.ForwardEthCalls {
logWithCommand.Info("Fowarding eth_call")
}
if serverConfig.ForwardGetStorageAt {
logWithCommand.Info("Fowarding eth_getStorageAt")
}
if serverConfig.ProxyOnError {
logWithCommand.Info("Proxy on error is enabled")
}
server.Serve(wg)
logWithCommand.Info("starting up server servers")
forwardPayloadChan = make(chan eth.ConvertedPayload, s.PayloadChanBufferSize)
server.Serve(wg, forwardPayloadChan)
if err := startServers(server, serverConfig); err != nil {
logWithCommand.Fatal(err)
}
@ -83,6 +83,11 @@ func serve() {
logWithCommand.Fatal(err)
}
err = startIpldGraphQL(serverConfig)
if err != nil {
logWithCommand.Fatal(err)
}
err = startGroupCacheService(serverConfig)
if err != nil {
logWithCommand.Fatal(err)
@ -92,7 +97,7 @@ func serve() {
go startStateTrieValidator(serverConfig, server)
logWithCommand.Info("state validator enabled")
} else {
logWithCommand.Debug("state validator disabled")
logWithCommand.Info("state validator disabled")
}
shutdown := make(chan os.Signal, 1)
@ -107,33 +112,33 @@ func serve() {
func startServers(server s.Server, settings *s.Config) error {
if settings.IPCEnabled {
logWithCommand.Debug("starting up IPC server")
logWithCommand.Info("starting up IPC server")
_, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs())
if err != nil {
return err
}
} else {
logWithCommand.Debug("IPC server is disabled")
logWithCommand.Info("IPC server is disabled")
}
if settings.WSEnabled {
logWithCommand.Debug("starting up WS server")
logWithCommand.Info("starting up WS server")
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil)
if err != nil {
return err
}
} else {
logWithCommand.Debug("WS server is disabled")
logWithCommand.Info("WS server is disabled")
}
if settings.HTTPEnabled {
logWithCommand.Debug("starting up HTTP server")
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "debug", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
logWithCommand.Info("starting up HTTP server")
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
if err != nil {
return err
}
} else {
logWithCommand.Debug("HTTP server is disabled")
logWithCommand.Info("HTTP server is disabled")
}
return nil
@ -141,7 +146,7 @@ func startServers(server s.Server, settings *s.Config) error {
func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphql.Service, err error) {
if settings.EthGraphqlEnabled {
logWithCommand.Debug("starting up ETH GraphQL server")
logWithCommand.Info("starting up ETH GraphQL server")
endPoint := settings.EthGraphqlEndpoint
if endPoint != "" {
graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
@ -151,17 +156,69 @@ func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphq
err = graphQLServer.Start(nil)
}
} else {
logWithCommand.Debug("ETH GraphQL server is disabled")
logWithCommand.Info("ETH GraphQL server is disabled")
}
return
}
func startIpldGraphQL(settings *s.Config) error {
if settings.IpldGraphqlEnabled {
logWithCommand.Info("starting up IPLD GraphQL server")
gqlIpldAddr, err := url.Parse(settings.IpldPostgraphileEndpoint)
if err != nil {
return err
}
gqlTracingAPIAddr, err := url.Parse(settings.TracingPostgraphileEndpoint)
if err != nil {
return err
}
ethClients, err := parseRpcAddresses(settings.EthHttpEndpoint)
if err != nil {
return err
}
var tracingClients []*rpc.Client
tracingEndpoint := viper.GetString("tracing.httpPath")
if tracingEndpoint != "" {
tracingClients, err = parseRpcAddresses(tracingEndpoint)
if err != nil {
return err
}
}
router, err := mux.NewServeMux(&mux.Options{
BasePath: "/",
EnableGraphiQL: true,
Postgraphile: mux.PostgraphileOptions{
Default: gqlIpldAddr,
TracingAPI: gqlTracingAPIAddr,
},
RPC: mux.RPCOptions{
DefaultClients: ethClients,
TracingClients: tracingClients,
},
})
if err != nil {
return err
}
go http.ListenAndServe(settings.IpldGraphqlEndpoint, router)
} else {
logWithCommand.Info("IPLD GraphQL server is disabled")
}
return nil
}
func startGroupCacheService(settings *s.Config) error {
gcc := settings.GroupCache
if gcc.Pool.Enabled {
logWithCommand.Debug("starting up groupcache pool HTTTP server")
logWithCommand.Info("starting up groupcache pool HTTTP server")
pool := groupcache.NewHTTPPoolOpts(gcc.Pool.HttpEndpoint, &groupcache.HTTPPoolOptions{})
pool.Set(gcc.Pool.PeerHttpEndpoints...)
@ -179,9 +236,9 @@ func startGroupCacheService(settings *s.Config) error {
// Start a HTTP server to listen for peer requests from the groupcache
go server.ListenAndServe()
logWithCommand.Infof("groupcache pool endpoint opened at %s", httpURL)
logWithCommand.Infof("groupcache pool endpoint opened for url %s", httpURL)
} else {
logWithCommand.Debug("Groupcache pool is disabled")
logWithCommand.Info("Groupcache pool is disabled")
}
return nil
@ -198,7 +255,7 @@ func startStateTrieValidator(config *s.Config, server s.Server) {
block, err := backend.CurrentBlock()
if err != nil {
log.Error("Error fetching current block for state trie validator")
log.Errorln("Error fetching current block for state trie validator")
continue
}
@ -262,14 +319,21 @@ func init() {
// flags for all config variables
// eth graphql and json-rpc parameters
serveCmd.PersistentFlags().Bool("server-graphql", false, "turn on the eth graphql server")
serveCmd.PersistentFlags().String("server-graphql-path", "", "endpoint url for eth graphql server (host:port)")
serveCmd.PersistentFlags().Bool("server-http", true, "turn on the eth http json-rpc server")
serveCmd.PersistentFlags().String("server-http-path", "", "endpoint url for eth http json-rpc server (host:port)")
serveCmd.PersistentFlags().Bool("server-ws", false, "turn on the eth websocket json-rpc server")
serveCmd.PersistentFlags().String("server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
serveCmd.PersistentFlags().Bool("server-ipc", false, "turn on the eth ipc json-rpc server")
serveCmd.PersistentFlags().String("server-ipc-path", "", "path for eth ipc json-rpc server")
serveCmd.PersistentFlags().Bool("eth-server-graphql", false, "turn on the eth graphql server")
serveCmd.PersistentFlags().String("eth-server-graphql-path", "", "endpoint url for eth graphql server (host:port)")
serveCmd.PersistentFlags().Bool("eth-server-http", true, "turn on the eth http json-rpc server")
serveCmd.PersistentFlags().String("eth-server-http-path", "", "endpoint url for eth http json-rpc server (host:port)")
serveCmd.PersistentFlags().Bool("eth-server-ws", false, "turn on the eth websocket json-rpc server")
serveCmd.PersistentFlags().String("eth-server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
serveCmd.PersistentFlags().Bool("eth-server-ipc", false, "turn on the eth ipc json-rpc server")
serveCmd.PersistentFlags().String("eth-server-ipc-path", "", "path for eth ipc json-rpc server")
// ipld and tracing graphql parameters
serveCmd.PersistentFlags().Bool("ipld-server-graphql", false, "turn on the ipld graphql server")
serveCmd.PersistentFlags().String("ipld-server-graphql-path", "", "endpoint url for ipld graphql server (host:port)")
serveCmd.PersistentFlags().String("ipld-postgraphile-path", "", "http url to postgraphile on top of ipld database")
serveCmd.PersistentFlags().String("tracing-http-path", "", "http url to tracing service")
serveCmd.PersistentFlags().String("tracing-postgraphile-path", "", "http url to postgraphile on top of tracing db")
serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
@ -298,20 +362,27 @@ func init() {
// and their bindings
// eth graphql server
viper.BindPFlag("server.graphql", serveCmd.PersistentFlags().Lookup("server-graphql"))
viper.BindPFlag("server.graphqlPath", serveCmd.PersistentFlags().Lookup("server-graphql-path"))
viper.BindPFlag("eth.server.graphql", serveCmd.PersistentFlags().Lookup("eth-server-graphql"))
viper.BindPFlag("eth.server.graphqlPath", serveCmd.PersistentFlags().Lookup("eth-server-graphql-path"))
// eth http json-rpc server
viper.BindPFlag("server.http", serveCmd.PersistentFlags().Lookup("server-http"))
viper.BindPFlag("server.httpPath", serveCmd.PersistentFlags().Lookup("server-http-path"))
viper.BindPFlag("eth.server.http", serveCmd.PersistentFlags().Lookup("eth-server-http"))
viper.BindPFlag("eth.server.httpPath", serveCmd.PersistentFlags().Lookup("eth-server-http-path"))
// eth websocket json-rpc server
viper.BindPFlag("server.ws", serveCmd.PersistentFlags().Lookup("server-ws"))
viper.BindPFlag("server.wsPath", serveCmd.PersistentFlags().Lookup("server-ws-path"))
viper.BindPFlag("eth.server.ws", serveCmd.PersistentFlags().Lookup("eth-server-ws"))
viper.BindPFlag("eth.server.wsPath", serveCmd.PersistentFlags().Lookup("eth-server-ws-path"))
// eth ipc json-rpc server
viper.BindPFlag("server.ipc", serveCmd.PersistentFlags().Lookup("server-ipc"))
viper.BindPFlag("server.ipcPath", serveCmd.PersistentFlags().Lookup("server-ipc-path"))
viper.BindPFlag("eth.server.ipc", serveCmd.PersistentFlags().Lookup("eth-server-ipc"))
viper.BindPFlag("eth.server.ipcPath", serveCmd.PersistentFlags().Lookup("eth-server-ipc-path"))
// ipld and tracing graphql parameters
viper.BindPFlag("ipld.server.graphql", serveCmd.PersistentFlags().Lookup("ipld-server-graphql"))
viper.BindPFlag("ipld.server.graphqlPath", serveCmd.PersistentFlags().Lookup("ipld-server-graphql-path"))
viper.BindPFlag("ipld.postgraphilePath", serveCmd.PersistentFlags().Lookup("ipld-postgraphile-path"))
viper.BindPFlag("tracing.httpPath", serveCmd.PersistentFlags().Lookup("tracing-http-path"))
viper.BindPFlag("tracing.postgraphilePath", serveCmd.PersistentFlags().Lookup("tracing-postgraphile-path"))
viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path"))
viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id"))
@ -319,13 +390,12 @@ func init() {
viper.BindPFlag("ethereum.genesisBlock", serveCmd.PersistentFlags().Lookup("eth-genesis-block"))
viper.BindPFlag("ethereum.networkID", serveCmd.PersistentFlags().Lookup("eth-network-id"))
viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id"))
viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender"))
viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap"))
viper.BindPFlag("ethereum.chainConfig", serveCmd.PersistentFlags().Lookup("eth-chain-config"))
viper.BindPFlag("ethereum.supportsStateDiff", serveCmd.PersistentFlags().Lookup("eth-supports-state-diff"))
viper.BindPFlag("ethereum.forwardEthCalls", serveCmd.PersistentFlags().Lookup("eth-forward-eth-calls"))
viper.BindPFlag("ethereum.forwardGetStorageAt", serveCmd.PersistentFlags().Lookup("eth-forward-get-storage-at"))
viper.BindPFlag("ethereum.proxyOnError", serveCmd.PersistentFlags().Lookup("eth-proxy-on-error"))
viper.BindPFlag("ethereum.getLogsBlockLimit", serveCmd.PersistentFlags().Lookup("eth-getlogs-block-limit"))
// groupcache flags
viper.BindPFlag("groupcache.pool.enabled", serveCmd.PersistentFlags().Lookup("gcache-pool-enabled"))

172
cmd/subscribe.go Normal file
View File

@ -0,0 +1,172 @@
// Copyright © 2019 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/cerc-io/ipld-eth-server/v4/pkg/client"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
w "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
)
// subscribeCmd represents the subscribe command
var subscribeCmd = &cobra.Command{
Use: "subscribe",
Short: "This command is used to subscribe to the eth ipfs watcher data stream with the provided filters",
Long: `This command is for demo and testing purposes and is used to subscribe to the watcher with the provided subscription configuration parameters.
It does not do anything with the data streamed from the watcher other than unpack it and print it out for demonstration purposes.`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
subscribe()
},
}
func init() {
rootCmd.AddCommand(subscribeCmd)
}
func subscribe() {
// Prep the subscription config/filters to be sent to the server
ethSubConfig, err := eth.NewEthSubscriptionConfig()
if err != nil {
log.Fatal(err)
}
// Create a new rpc client and a subscription streamer with that client
rpcClient, err := getRPCClient()
if err != nil {
logWithCommand.Fatal(err)
}
subClient := client.NewClient(rpcClient)
// Buffered channel for reading subscription payloads
payloadChan := make(chan w.SubscriptionPayload, 20000)
// Subscribe to the watcher service with the given config/filter parameters
sub, err := subClient.Stream(payloadChan, *ethSubConfig)
if err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Info("awaiting payloads")
// Receive response payloads and print out the results
for {
select {
case payload := <-payloadChan:
if payload.Err != "" {
logWithCommand.Error(payload.Err)
continue
}
var ethData eth.IPLDs
if err := rlp.DecodeBytes(payload.Data, &ethData); err != nil {
logWithCommand.Error(err)
continue
}
var header types.Header
err = rlp.Decode(bytes.NewBuffer(ethData.Header.Data), &header)
if err != nil {
logWithCommand.Error(err)
continue
}
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
fmt.Printf("header: %v\n", header)
for _, trxRlp := range ethData.Transactions {
var trx types.Transaction
buff := bytes.NewBuffer(trxRlp.Data)
stream := rlp.NewStream(buff, 0)
err := trx.DecodeRLP(stream)
if err != nil {
logWithCommand.Error(err)
continue
}
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
fmt.Printf("trx: %v\n", trx)
}
for _, rctRlp := range ethData.Receipts {
var rct types.Receipt
buff := bytes.NewBuffer(rctRlp.Data)
stream := rlp.NewStream(buff, 0)
err = rct.DecodeRLP(stream)
if err != nil {
logWithCommand.Error(err)
continue
}
fmt.Printf("Receipt with block hash %s, trx hash %s\n", rct.BlockHash.Hex(), rct.TxHash.Hex())
fmt.Printf("rct: %v\n", rct)
for _, l := range rct.Logs {
if len(l.Topics) < 1 {
logWithCommand.Error(fmt.Sprintf("log only has %d topics", len(l.Topics)))
continue
}
fmt.Printf("Log for block hash %s, trx hash %s, address %s, and with topic0 %s\n",
l.BlockHash.Hex(), l.TxHash.Hex(), l.Address.Hex(), l.Topics[0].Hex())
fmt.Printf("log: %v\n", l)
}
}
// This assumes leafs only
for _, stateNode := range ethData.StateNodes {
var acct types.StateAccount
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
if err != nil {
logWithCommand.Error(err)
continue
}
fmt.Printf("Account for key %s, and root %s, with balance %s\n",
stateNode.StateLeafKey.Hex(), acct.Root.Hex(), acct.Balance.String())
fmt.Printf("state account: %+v\n", acct)
}
for _, storageNode := range ethData.StorageNodes {
fmt.Printf("Storage for state key %s ", storageNode.StateLeafKey.Hex())
fmt.Printf("with storage key %s\n", storageNode.StorageLeafKey.Hex())
var i []interface{}
err := rlp.DecodeBytes(storageNode.IPLD.Data, &i)
if err != nil {
logWithCommand.Error(err)
continue
}
// if a value node
if len(i) == 1 {
valueBytes, ok := i[0].([]byte)
if !ok {
continue
}
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
storageNode.StorageLeafKey.Hex(), common.BytesToHash(valueBytes).Hex())
}
}
case err = <-sub.Err():
logWithCommand.Fatal(err)
}
}
}
func getRPCClient() (*rpc.Client, error) {
vulcPath := viper.GetString("watcher.ethSubscription.wsPath")
if vulcPath == "" {
vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
}
return rpc.Dial(vulcPath)
}

View File

@ -18,14 +18,14 @@ package cmd
import (
"time"
validator "github.com/cerc-io/eth-ipfs-state-validator/v5/pkg"
ipfsethdb "github.com/cerc-io/ipfs-ethdb/v5/postgres/v0"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
validator "github.com/cerc-io/eth-ipfs-state-validator/v4/pkg"
ipfsethdb "github.com/cerc-io/ipfs-ethdb/v4/postgres"
"github.com/ethereum/go-ethereum/common"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
s "github.com/cerc-io/ipld-eth-server/v5/pkg/serve"
s "github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
)
const GroupName = "statedb-validate"
@ -34,7 +34,7 @@ const CacheSizeInMB = 16 // 16 MB
var validateCmd = &cobra.Command{
Use: "validate",
Short: "validate state",
Short: "valdiate state",
Long: `This command validates the trie for the given state root`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
@ -65,13 +65,13 @@ func validate() {
val := validator.NewValidator(nil, ethDB)
if err = val.ValidateTrie(stateRoot); err != nil {
log.Fatal("Error validating state root")
log.Fatalln("Error validating state root")
}
stats := ethDB.(*ipfsethdb.Database).GetCacheStats()
log.Debugf("groupcache stats %+v", stats)
log.Info("Successfully validated state root")
log.Infoln("Successfully validated state root")
}
func init() {

View File

@ -16,17 +16,19 @@
package cmd
import (
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v "github.com/cerc-io/ipld-eth-server/v5/version"
v "github.com/cerc-io/ipld-eth-server/v4/version"
)
// versionCmd represents the version command
var versionCmd = &cobra.Command{
Use: "version",
Short: "Prints the version of ipld-eth-server",
Long: `Use this command to fetch the version of ipld-eth-server`,
Long: `Use this command to fetch the version of ipld-eth-server
Usage: ./ipld-eth-server version`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)

12
docker-compose.test.yml Normal file
View File

@ -0,0 +1,12 @@
version: '3.2'
services:
contract:
build:
context: ./test/contract
args:
ETH_ADDR: "http://go-ethereum:8545"
environment:
ETH_ADDR: "http://go-ethereum:8545"
ports:
- "127.0.0.1:3000:3000"

59
docker-compose.yml Normal file
View File

@ -0,0 +1,59 @@
version: '3.2'
services:
migrations:
restart: on-failure
depends_on:
- ipld-eth-db
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v4.2.1-alpha-unstable
environment:
DATABASE_USER: "vdbm"
DATABASE_NAME: "vulcanize_testing"
DATABASE_PASSWORD: "password"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
ipld-eth-db:
image: timescale/timescaledb:latest-pg14
restart: always
command: ["postgres", "-c", "log_statement=all"]
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_testing"
POSTGRES_PASSWORD: "password"
ports:
- "127.0.0.1:8077:5432"
eth-server:
restart: unless-stopped
depends_on:
- ipld-eth-db
build:
context: ./
cache_from:
- alpine:latest
- golang:1.13-alpine
environment:
IPLD_SERVER_GRAPHQL: "true"
IPLD_POSTGRAPHILEPATH: http://graphql:5000
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: "vulcanize_testing"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
DATABASE_USER: "vdbm"
DATABASE_PASSWORD: "password"
ETH_CHAIN_ID: 4
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS
ETH_PROXY_ON_ERROR: $ETH_PROXY_ON_ERROR
ETH_HTTP_PATH: $ETH_HTTP_PATH
volumes:
- type: bind
source: ./chain.json
target: /tmp/chain.json
ports:
- "127.0.0.1:8081:8081"
volumes:
vdb_db_eth_server:

View File

@ -40,9 +40,9 @@ An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-s
"github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper"
"github.com/cerc-io/ipld-eth-server/v5/pkg/client"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/watch"
"github.com/cerc-io/ipld-eth-server/v4/pkg/client"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/watch"
)
config, _ := eth.NewEthSubscriptionConfig()
@ -160,9 +160,9 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-se
"github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/v5/pkg/btc"
"github.com/vulcanize/ipld-eth-server/v5/pkg/client"
"github.com/vulcanize/ipld-eth-server/v5/pkg/watch"
"github.com/vulcanize/ipld-eth-server/v4/pkg/btc"
"github.com/vulcanize/ipld-eth-server/v4/pkg/client"
"github.com/vulcanize/ipld-eth-server/v4/pkg/watch"
)
config, _ := btc.NewBtcSubscriptionConfig()

View File

@ -2,13 +2,8 @@
echo "Beginning the ipld-eth-server process"
START_CMD="./ipld-eth-server"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec `pwd`/ipld-eth-server --continue --"
fi
echo running: $START_CMD ${VDB_COMMAND} --config=`pwd`/config.toml
$START_CMD ${VDB_COMMAND} --config=`pwd`/config.toml
echo running: ./ipld-eth-server ${VDB_COMMAND} --config=config.toml
./ipld-eth-server ${VDB_COMMAND} --config=config.toml
rv=$?
if [ $rv != 0 ]; then

View File

@ -1,30 +1,27 @@
[database]
name = "cerc_testing" # $DATABASE_NAME
name = "vulcanize_public" # $DATABASE_NAME
hostname = "localhost" # $DATABASE_HOSTNAME
port = 5432 # $DATABASE_PORT
user = "postgres" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD
[log]
level = "info" # $LOG_LEVEL
level = "info" # $LOGRUS_LEVEL
[server]
ipc = false
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SERVER_IPC_PATH
ws = true
wsPath = "127.0.0.1:8080" # $SERVER_WS_PATH
http = true
httpPath = "127.0.0.1:8081" # $SERVER_HTTP_PATH
wsPath = "127.0.0.1:8081" # $SERVER_WS_PATH
httpPath = "127.0.0.1:8082" # $SERVER_HTTP_PATH
graphql = true # $SERVER_GRAPHQL
graphqlPath = "127.0.0.1:8082" # $SERVER_GRAPHQL_PATH
graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT
[ethereum]
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
chainID = "1" # $ETH_CHAIN_ID
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
stateDiffTimeout = "4m" # $ETH_STATEDIFF_TIMEOUT
forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS
proxyOnError = true # $ETH_PROXY_ON_ERROR
nodeID = "arch1" # $ETH_NODE_ID

View File

@ -0,0 +1,30 @@
[watcher]
[watcher.ethSubscription]
historicalData = false
historicalDataOnly = false
startingBlock = 0
endingBlock = 0
wsPath = "ws://127.0.0.1:8080"
[watcher.ethSubscription.headerFilter]
off = false
uncles = false
[watcher.ethSubscription.txFilter]
off = false
src = []
dst = []
[watcher.ethSubscription.receiptFilter]
off = false
contracts = []
topic0s = []
topic1s = []
topic2s = []
topic3s = []
[watcher.ethSubscription.stateFilter]
off = false
addresses = []
intermediateNodes = false
[watcher.ethSubscription.storageFilter]
off = true
addresses = []
storageKeys = []
intermediateNodes = false

336
go.mod
View File

@ -1,304 +1,296 @@
module github.com/cerc-io/ipld-eth-server/v5
module github.com/cerc-io/ipld-eth-server/v4
go 1.19
go 1.18
require (
github.com/cerc-io/eth-ipfs-state-validator/v5 v5.0.0-alpha
github.com/cerc-io/eth-iterator-utils v0.1.1
github.com/cerc-io/ipfs-ethdb/v5 v5.0.0-alpha
github.com/cerc-io/ipld-eth-statedb v0.0.5-alpha
github.com/cerc-io/plugeth-statediff v0.1.1
github.com/ethereum/go-ethereum v1.11.6
github.com/google/uuid v1.3.0
github.com/cerc-io/eth-ipfs-state-validator/v4 v4.0.8-alpha
github.com/cerc-io/ipfs-ethdb/v4 v4.0.8-alpha
github.com/ethereum/go-ethereum v1.10.23
github.com/graph-gophers/graphql-go v1.3.0
github.com/ipfs/go-cid v0.4.1
github.com/ipfs/go-block-format v0.0.3
github.com/ipfs/go-cid v0.2.0
github.com/ipfs/go-ipfs-blockstore v1.2.0
github.com/ipfs/go-ipfs-ds-help v1.1.0
github.com/jmoiron/sqlx v1.3.5
github.com/joho/godotenv v1.4.0
github.com/lib/pq v1.10.9
github.com/lib/pq v1.10.6
github.com/machinebox/graphql v0.2.2
github.com/mailgun/groupcache/v2 v2.3.0
github.com/onsi/ginkgo/v2 v2.9.2
github.com/onsi/gomega v1.27.4
github.com/prometheus/client_golang v1.16.0
github.com/multiformats/go-multihash v0.2.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.19.0
github.com/prometheus/client_golang v1.12.1
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.4.0
github.com/spf13/viper v1.11.0
github.com/vulcanize/gap-filler v0.4.0
gorm.io/driver/postgres v1.3.7
gorm.io/gorm v1.23.5
)
require (
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect
github.com/DataDog/zstd v1.5.5 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/Stebalien/go-bitfield v0.0.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
github.com/btcsuite/btcd v0.22.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.10.0 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230720154706-692f3b61a3c4 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cerc-io/go-eth-state-node-iterator v1.1.7 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
github.com/cskr/pubsub v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set/v2 v2.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
github.com/fjl/memsize v0.0.1 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/friendsofgo/graphiql v0.2.2 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/georgysavva/scany v0.2.9 // indirect
github.com/getsentry/sentry-go v0.22.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graphql-go/graphql v0.7.9 // indirect
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.12 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.2.0 // indirect
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitfield v1.0.0 // indirect
github.com/ipfs/go-bitswap v0.11.0 // indirect
github.com/ipfs/go-block-format v0.0.3 // indirect
github.com/ipfs/go-blockservice v0.5.0 // indirect
github.com/ipfs/go-bitswap v0.8.0 // indirect
github.com/ipfs/go-blockservice v0.4.0 // indirect
github.com/ipfs/go-cidutil v0.1.0 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect
github.com/ipfs/go-delegated-routing v0.7.0 // indirect
github.com/ipfs/go-datastore v0.5.1 // indirect
github.com/ipfs/go-delegated-routing v0.3.0 // indirect
github.com/ipfs/go-ds-measure v0.2.0 // indirect
github.com/ipfs/go-fetcher v1.6.1 // indirect
github.com/ipfs/go-filestore v1.2.0 // indirect
github.com/ipfs/go-fs-lock v0.0.7 // indirect
github.com/ipfs/go-graphsync v0.14.1 // indirect
github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect
github.com/ipfs/go-graphsync v0.13.1 // indirect
github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect
github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect
github.com/ipfs/go-ipfs-keystore v0.1.0 // indirect
github.com/ipfs/go-ipfs-files v0.1.1 // indirect
github.com/ipfs/go-ipfs-keystore v0.0.2 // indirect
github.com/ipfs/go-ipfs-pinner v0.2.1 // indirect
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
github.com/ipfs/go-ipfs-provider v0.8.1 // indirect
github.com/ipfs/go-ipfs-routing v0.3.0 // indirect
github.com/ipfs/go-ipfs-provider v0.7.1 // indirect
github.com/ipfs/go-ipfs-routing v0.2.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-ipld-cbor v0.0.6 // indirect
github.com/ipfs/go-ipld-cbor v0.0.5 // indirect
github.com/ipfs/go-ipld-format v0.4.0 // indirect
github.com/ipfs/go-ipld-legacy v0.1.1 // indirect
github.com/ipfs/go-ipns v0.3.0 // indirect
github.com/ipfs/go-libipfs v0.2.0 // indirect
github.com/ipfs/go-ipns v0.1.2 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.5.1 // indirect
github.com/ipfs/go-merkledag v0.9.0 // indirect
github.com/ipfs/go-merkledag v0.6.0 // indirect
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
github.com/ipfs/go-mfs v0.2.1 // indirect
github.com/ipfs/go-namesys v0.6.0 // indirect
github.com/ipfs/go-namesys v0.5.0 // indirect
github.com/ipfs/go-path v0.3.0 // indirect
github.com/ipfs/go-peertaskqueue v0.8.0 // indirect
github.com/ipfs/go-unixfs v0.4.2 // indirect
github.com/ipfs/go-unixfsnode v1.5.1 // indirect
github.com/ipfs/go-verifcid v0.0.2 // indirect
github.com/ipfs/interface-go-ipfs-core v0.8.2 // indirect
github.com/ipfs/kubo v0.18.1 // indirect
github.com/ipld/edelweiss v0.2.0 // indirect
github.com/ipld/go-codec-dagpb v1.5.0 // indirect
github.com/ipld/go-ipld-prime v0.19.0 // indirect
github.com/ipfs/go-peertaskqueue v0.7.1 // indirect
github.com/ipfs/go-unixfs v0.4.0 // indirect
github.com/ipfs/go-unixfsnode v1.4.0 // indirect
github.com/ipfs/go-verifcid v0.0.1 // indirect
github.com/ipfs/interface-go-ipfs-core v0.7.0 // indirect
github.com/ipfs/kubo v0.14.0 // indirect
github.com/ipld/edelweiss v0.1.4 // indirect
github.com/ipld/go-codec-dagpb v1.4.0 // indirect
github.com/ipld/go-ipld-prime v0.17.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.14.0 // indirect
github.com/jackc/pgconn v1.12.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgtype v1.14.0 // indirect
github.com/jackc/pgx/v4 v4.18.1 // indirect
github.com/jackc/puddle v1.3.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.11.0 // indirect
github.com/jackc/pgx/v4 v4.16.1 // indirect
github.com/jackc/puddle v1.2.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/copier v0.2.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.4 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/klauspost/compress v1.15.1 // indirect
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/koron/go-ssdp v0.0.2 // indirect
github.com/libp2p/go-buffer-pool v0.0.2 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-doh-resolver v0.4.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p v0.24.2 // indirect
github.com/libp2p/go-eventbus v0.2.1 // indirect
github.com/libp2p/go-flow-metrics v0.0.3 // indirect
github.com/libp2p/go-libp2p v0.20.3 // indirect
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.20.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.8.3 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
github.com/libp2p/go-libp2p-record v0.2.0 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.6.0 // indirect
github.com/libp2p/go-libp2p-core v0.16.1 // indirect
github.com/libp2p/go-libp2p-discovery v0.7.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.16.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect
github.com/libp2p/go-libp2p-peerstore v0.6.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.6.1 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.5.0 // indirect
github.com/libp2p/go-libp2p-record v0.1.3 // indirect
github.com/libp2p/go-libp2p-resource-manager v0.3.0 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
github.com/libp2p/go-libp2p-swarm v0.11.0 // indirect
github.com/libp2p/go-libp2p-xor v0.1.0 // indirect
github.com/libp2p/go-mplex v0.7.0 // indirect
github.com/libp2p/go-msgio v0.2.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-openssl v0.1.0 // indirect
github.com/libp2p/go-netroute v0.2.0 // indirect
github.com/libp2p/go-openssl v0.0.7 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
github.com/lucas-clemente/quic-go v0.31.1 // indirect
github.com/libp2p/go-yamux/v3 v3.1.2 // indirect
github.com/libp2p/zeroconf/v2 v2.1.1 // indirect
github.com/lucas-clemente/quic-go v0.27.1 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/marten-seemann/qpack v0.3.0 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.3 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.1 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/marten-seemann/webtransport-go v0.4.3 // indirect
github.com/matryer/is v1.4.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-pointer v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/matryer/is v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/dns v1.1.48 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.8.0 // indirect
github.com/multiformats/go-base32 v0.0.4 // indirect
github.com/multiformats/go-base36 v0.1.0 // indirect
github.com/multiformats/go-multiaddr v0.5.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.7.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multibase v0.1.0 // indirect
github.com/multiformats/go-multicodec v0.5.0 // indirect
github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/openrelayxyz/plugeth-utils v1.2.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
github.com/pganalyze/pg_query_go/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.0 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/rs/cors v1.9.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.33.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/prometheus/tsdb v0.10.0 // indirect
github.com/raulk/clock v1.1.0 // indirect
github.com/raulk/go-watchdog v1.2.0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/samber/lo v1.36.0 // indirect
github.com/segmentio/fasthash v1.0.3 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/smartystreets/assertions v1.0.1 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/stretchr/testify v1.8.2 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.7.2 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/thoas/go-funk v0.9.3 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/urfave/cli/v2 v2.25.7 // indirect
github.com/thoas/go-funk v0.9.2 // indirect
github.com/tidwall/gjson v1.14.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/urfave/cli/v2 v2.10.2 // indirect
github.com/valyala/fastjson v1.6.3 // indirect
github.com/wI2L/jsondiff v0.2.0 // indirect
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
github.com/whyrusleeping/cbor-gen v0.0.0-20221220214510-0333c149dec0 // indirect
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opencensus.io v0.24.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v1.7.0 // indirect
go.opentelemetry.io/otel/trace v1.7.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.15.0 // indirect
go.uber.org/fx v1.18.2 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/dig v1.14.0 // indirect
go.uber.org/fx v1.16.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.21.0 // indirect
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.1.10 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)
replace (
github.com/cerc-io/eth-ipfs-state-validator/v5 => git.vdb.to/cerc-io/eth-ipfs-state-validator/v5 v5.1.1-alpha
github.com/cerc-io/eth-iterator-utils => git.vdb.to/cerc-io/eth-iterator-utils v0.1.2
github.com/cerc-io/eth-testing => git.vdb.to/cerc-io/eth-testing v0.3.1
github.com/cerc-io/ipld-eth-statedb => git.vdb.to/cerc-io/ipld-eth-statedb v0.0.6-alpha
github.com/cerc-io/plugeth-statediff => git.vdb.to/cerc-io/plugeth-statediff v0.1.4
github.com/ethereum/go-ethereum => git.vdb.to/cerc-io/plugeth v0.0.0-20230808125822-691dc334fab1
github.com/openrelayxyz/plugeth-utils => git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46
)
replace github.com/ethereum/go-ethereum v1.10.23 => github.com/cerc-io/go-ethereum v1.10.23-statediff-4.2.0-alpha

986
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,72 +0,0 @@
package integration
import (
"encoding/json"
"fmt"
"math/big"
"net/http"
"github.com/ethereum/go-ethereum/common"
)
type ContractDeployed struct {
Address common.Address `json:"address"`
TransactionHash common.Hash `json:"txHash"`
BlockNumber int64 `json:"blockNumber"`
BlockHash common.Hash `json:"blockHash"`
}
type ContractDestroyed struct {
BlockNumber int64 `json:"blockNumber"`
}
type Tx struct {
From string `json:"from"`
To string `json:"to"`
Value *big.Int `json:"value"`
TransactionHash string `json:"txHash"`
BlockNumber int64 `json:"blockNumber"`
BlockHash string `json:"blockHash"`
}
type StorageKey struct {
Key string `json:"key"`
}
type CountIncremented struct {
BlockNumber *big.Int `json:"blockNumber"`
}
const ContractServerUrl = "http://localhost:3000"
// Factory which creates endpoint functions
func MakeGetAndDecodeFunc[R any](format string) func(...interface{}) (*R, error) {
return func(params ...interface{}) (*R, error) {
params = append([]interface{}{ContractServerUrl}, params...)
url := fmt.Sprintf(format, params...)
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", url, res.Status)
}
var data R
decoder := json.NewDecoder(res.Body)
return &data, decoder.Decode(&data)
}
}
var (
DeployContract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/deployContract")
DestroyContract = MakeGetAndDecodeFunc[ContractDestroyed]("%s/v1/destroyContract?addr=%s")
DeploySLVContract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/deploySLVContract")
DestroySLVContract = MakeGetAndDecodeFunc[ContractDestroyed]("%s/v1/destroySLVContract?addr=%s")
SendEth = MakeGetAndDecodeFunc[Tx]("%s/v1/sendEth?to=%s&value=%s")
GetStorageSlotKey = MakeGetAndDecodeFunc[StorageKey]("%s/v1/getStorageKey?contract=%s&label=%s")
IncrementCount = MakeGetAndDecodeFunc[CountIncremented]("%s/v1/incrementCount%s?addr=%s")
Create2Contract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/create2Contract?contract=%s&salt=%s")
)

View File

@ -1,378 +0,0 @@
package integration_test
import (
"context"
"math/big"
"math/rand"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/integration"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
)
var _ = Describe("Direct proxy integration test", Label("proxy"), func() {
ctx := context.Background()
var contract *integration.ContractDeployed
var tx *integration.Tx
var contractErr error
var txErr error
Describe("get Block", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("get not existing block by number", func() {
blockNum := contract.BlockNumber + 100
gethBlock, err := gethClient.BlockByNumber(ctx, big.NewInt(blockNum))
Expect(err).To(MatchError(ethereum.NotFound))
Expect(gethBlock).To(BeZero())
ipldBlock, err := ipldClient.BlockByNumber(ctx, big.NewInt(blockNum))
Expect(err).To(MatchError(ethereum.NotFound))
Expect(ipldBlock).To(BeZero())
})
It("get not existing block by hash", func() {
gethBlock, err := gethClient.BlockByHash(ctx, nonExistingBlockHash)
Expect(err).To(MatchError(ethereum.NotFound))
Expect(gethBlock).To(BeZero())
ipldBlock, err := ipldClient.BlockByHash(ctx, nonExistingBlockHash)
Expect(err).To(MatchError(ethereum.NotFound))
Expect(ipldBlock).To(BeZero())
})
It("get block by number", func() {
blockNum := contract.BlockNumber
_, err := gethClient.BlockByNumber(ctx, big.NewInt(blockNum))
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.BlockByNumber(ctx, big.NewInt(blockNum))
Expect(err).To(HaveOccurred())
})
It("get block by hash", func() {
_, err := gethClient.BlockByHash(ctx, contract.BlockHash)
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.BlockByHash(ctx, contract.BlockHash)
Expect(err).To(HaveOccurred())
})
})
Describe("Transaction", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("Get tx by hash", func() {
_, _, err := gethClient.TransactionByHash(ctx, contract.TransactionHash)
Expect(err).ToNot(HaveOccurred())
_, _, err = ipldClient.TransactionByHash(ctx, contract.TransactionHash)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("not found"))
})
It("Get tx by block hash and index", func() {
_, err := gethClient.TransactionInBlock(ctx, contract.BlockHash, 0)
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.TransactionInBlock(ctx, contract.BlockHash, 0)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("not found"))
})
})
Describe("Receipt", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("Get tx receipt", func() {
_, err := gethClient.TransactionReceipt(ctx, contract.TransactionHash)
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.TransactionReceipt(ctx, contract.TransactionHash)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("not found"))
})
})
Describe("FilterLogs", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("with blockhash", func() {
blockHash := contract.BlockHash
filterQuery := ethereum.FilterQuery{
//Addresses: addresses,
BlockHash: &blockHash,
Topics: [][]common.Hash{},
}
gethLogs, err := gethClient.FilterLogs(ctx, filterQuery)
Expect(err).ToNot(HaveOccurred())
ipldLogs, err := ipldClient.FilterLogs(ctx, filterQuery)
Expect(err).ToNot(HaveOccurred())
// not empty list
Expect(gethLogs).ToNot(BeEmpty())
// empty list
Expect(ipldLogs).To(BeEmpty())
})
})
Describe("CodeAt", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("gets code of deployed contract with block number", func() {
_, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
Expect(ipldCode).To(BeEmpty())
})
It("gets code of contract that doesn't exist at this height", func() {
gethCode, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
Expect(err).ToNot(HaveOccurred())
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
Expect(err).ToNot(HaveOccurred())
Expect(gethCode).To(BeEmpty())
Expect(gethCode).To(Equal(ipldCode))
})
It("gets code at non-existing address without block number", func() {
gethCode, err := gethClient.CodeAt(ctx, nonExistingAddress, nil)
Expect(err).ToNot(HaveOccurred())
ipldCode, err := ipldClient.CodeAt(ctx, nonExistingAddress, nil)
Expect(err).ToNot(HaveOccurred())
Expect(gethCode).To(BeEmpty())
Expect(gethCode).To(Equal(ipldCode))
})
It("gets code of deployed contract without block number", func() {
_, err := gethClient.CodeAt(ctx, contract.Address, nil)
Expect(err).ToNot(HaveOccurred())
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, nil)
Expect(err).ToNot(HaveOccurred())
Expect(ipldCode).To(BeEmpty())
})
})
Describe("Get balance", func() {
var newAddress common.Address
rand.Read(newAddress[:])
BeforeEach(func() {
tx, txErr = integration.SendEth(newAddress, "0.01")
Expect(txErr).ToNot(HaveOccurred())
})
It("gets balance for an account with eth without block number", func() {
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, nil)
Expect(err).ToNot(HaveOccurred())
Expect(gethBalance.String()).To(Equal("10000000000000000"))
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, nil)
Expect(err).ToNot(HaveOccurred())
Expect(ipldBalance.String()).To(Equal("0"))
})
It("gets balance for an account with eth with block number", func() {
_, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
Expect(err).To(MatchError("header not found"))
})
It("gets historical balance for an account with eth with block number", func() {
_, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
Expect(err).To(MatchError("header not found"))
})
It("gets balance for a non-existing account without block number", func() {
gethBalance, err := gethClient.BalanceAt(ctx, nonExistingAddress, nil)
Expect(err).ToNot(HaveOccurred())
ipldBalance, err := ipldClient.BalanceAt(ctx, nonExistingAddress, nil)
Expect(err).ToNot(HaveOccurred())
Expect(gethBalance).To(Equal(ipldBalance))
})
It("gets balance for an non-existing block number", func() {
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber+3))
Expect(err).To(MatchError("header not found"))
ipldBalance, err := ipldClient.BalanceAt(ctx, nonExistingAddress, big.NewInt(tx.BlockNumber+3))
Expect(err).To(MatchError("header not found"))
Expect(gethBalance).To(Equal(ipldBalance))
})
})
Describe("Get Storage", func() {
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
})
It("gets ERC20 total supply (without block number)", func() {
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
Expect(err).ToNot(HaveOccurred())
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
Expect(err).ToNot(HaveOccurred())
Expect(ipldStorage).To(Equal(make([]byte, 32)))
})
It("gets ERC20 total supply (with block number)", func() {
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
Expect(err).To(MatchError("header not found"))
})
It("gets storage for non-existing account", func() {
_, err := gethClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
Expect(err).To(MatchError("header not found"))
})
It("gets storage for non-existing contract slot", func() {
_, err := gethClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
Expect(err).To(MatchError("header not found"))
})
It("gets storage for non-existing contract", func() {
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
Expect(err).ToNot(HaveOccurred())
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
Expect(err).ToNot(HaveOccurred())
Expect(gethStorage).To(Equal(ipldStorage))
})
It("gets storage for non-existing block number", func() {
blockNum := contract.BlockNumber + 100
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
Expect(err).To(MatchError("header not found"))
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
Expect(err).To(MatchError("header not found"))
Expect(gethStorage).To(Equal(ipldStorage))
})
It("get storage after self destruct", func() {
tx, err := integration.DestroyContract(contract.Address)
Expect(err).ToNot(HaveOccurred())
gethStorage1, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
Expect(err).ToNot(HaveOccurred())
gethStorage2, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
Expect(err).ToNot(HaveOccurred())
Expect(gethStorage1).NotTo(Equal(gethStorage2))
Expect(gethStorage2).To(Equal(eth.EmptyNodeValue))
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
Expect(err).To(MatchError("header not found"))
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
Expect(err).To(MatchError("header not found"))
// Query the current block
ipldStorage3, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
Expect(err).ToNot(HaveOccurred())
Expect(eth.EmptyNodeValue).To(Equal(ipldStorage3))
})
})
Describe("eth call", func() {
var msg ethereum.CallMsg
BeforeEach(func() {
contract, contractErr = integration.DeployContract()
Expect(contractErr).ToNot(HaveOccurred())
msg = ethereum.CallMsg{
To: &contract.Address,
Data: common.Hex2Bytes("18160ddd"), // totalSupply()
}
})
It("calls totalSupply() without block number", func() {
gethResult, err := gethClient.CallContract(ctx, msg, nil)
Expect(err).ToNot(HaveOccurred())
gethTotalSupply := new(big.Int).SetBytes(gethResult)
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
ipldResult, err := ipldClient.CallContract(ctx, msg, nil)
Expect(err).ToNot(HaveOccurred())
Expect(gethResult).To(Equal(ipldResult))
})
It("calls totalSupply() with block number", func() {
gethResult, err := gethClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
gethTotalSupply := new(big.Int).SetBytes(gethResult)
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
ipldResult, err := ipldClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
Expect(err).ToNot(HaveOccurred())
Expect(gethResult).To(Equal(ipldResult))
})
})
Describe("Chain ID", func() {
It("Check chain id", func() {
_, err := gethClient.ChainID(ctx)
Expect(err).ToNot(HaveOccurred())
_, err = ipldClient.ChainID(ctx)
Expect(err).ToNot(HaveOccurred())
})
})
})

View File

@ -1,53 +0,0 @@
package integration_test
import (
"os"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "integration test suite")
}
var (
gethHttpPath = "http://127.0.0.1:8545"
ipldEthHttpPath = "http://127.0.0.1:8081"
gethClient *ethclient.Client
ipldClient *ethclient.Client
gethRPCClient *rpc.Client
ipldRPCClient *rpc.Client
testChainId int64 = 99
)
var _ = BeforeSuite(func() {
var err error
envChainID := os.Getenv("ETH_CHAIN_ID")
if len(envChainID) == 0 {
panic("ETH_CHAIN_ID must be set")
}
testChainId, err = strconv.ParseInt(envChainID, 10, 64)
Expect(err).ToNot(HaveOccurred())
if path := os.Getenv("ETH_HTTP_PATH"); len(path) != 0 {
gethHttpPath = "http://" + path
}
if path := os.Getenv("SERVER_HTTP_PATH"); len(path) != 0 {
ipldEthHttpPath = "http://" + path
}
gethClient, err = ethclient.Dial(gethHttpPath)
Expect(err).ToNot(HaveOccurred())
ipldClient, err = ethclient.Dial(ipldEthHttpPath)
Expect(err).ToNot(HaveOccurred())
})

View File

@ -1,28 +0,0 @@
package integration_test
import (
"context"
"errors"
"time"
"github.com/ethereum/go-ethereum/ethclient"
)
func waitForBlock(ctx context.Context, client *ethclient.Client, target int64) error {
timeout := 10 * time.Second
for {
select {
case <-time.After(timeout):
return errors.New("timed out")
default:
latest, err := client.BlockNumber(ctx)
if err != nil {
return err
}
if uint64(target) <= latest {
return nil
}
time.Sleep(time.Second)
}
}
}

View File

@ -16,9 +16,14 @@
package main
import (
"github.com/cerc-io/ipld-eth-server/v5/cmd"
"github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v4/cmd"
)
func main() {
logrus.SetFormatter(&logrus.TextFormatter{
FullTimestamp: true,
})
cmd.Execute()
}

BIN
pkg/.DS_Store vendored Normal file

Binary file not shown.

44
pkg/client/client.go Normal file
View File

@ -0,0 +1,44 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Client is used by watchers to stream chain IPLD data from a vulcanizedb ipld-eth-server
package client
import (
"context"
"github.com/ethereum/go-ethereum/rpc"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/serve"
)
// Client is used to subscribe to the ipld-eth-server ipld data stream
type Client struct {
c *rpc.Client
}
// NewClient creates a new Client
func NewClient(c *rpc.Client) *Client {
return &Client{
c: c,
}
}
// Stream is the main loop for subscribing to iplds from an ipld-eth-server server
func (c *Client) Stream(payloadChan chan serve.SubscriptionPayload, params eth.SubscriptionSettings) (*rpc.ClientSubscription, error) {
return c.c.Subscribe(context.Background(), "vdb", payloadChan, "stream", params)
}

View File

@ -1,51 +0,0 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package debug
import (
"context"
"errors"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
)
var _ tracers.Backend = &Backend{}
var (
errMethodNotSupported = errors.New("backend method not supported")
)
// Backend implements tracers.Backend interface
type Backend struct {
eth.Backend
}
// StateAtBlock retrieves the state database associated with a certain block
func (b *Backend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) {
return nil, func() {}, errMethodNotSupported
}
// StateAtTransaction returns the execution environment of a certain transaction
func (b *Backend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
return nil, vm.BlockContext{}, nil, func() {}, errMethodNotSupported
}

View File

@ -27,25 +27,22 @@ import (
"strconv"
"time"
"github.com/cerc-io/plugeth-statediff"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
"github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
ipld_direct_state "github.com/cerc-io/ipld-eth-statedb/direct_by_leaf"
)
const (
defaultEVMTimeout = 30 * time.Second
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
// APIName is the namespace for the watcher's eth api
@ -54,38 +51,28 @@ const APIName = "eth"
// APIVersion is the version of the watcher's eth api
const APIVersion = "0.0.1"
type APIConfig struct {
// Proxy node for forwarding cache misses
SupportsStateDiff bool // Whether the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss
ForwardEthCalls bool // if true, forward eth_call calls directly to the configured proxy node
ForwardGetStorageAt bool // if true, forward eth_getStorageAt calls directly to the configured proxy node
ProxyOnError bool // turn on regular proxy fall-through on errors; needed to test difference between direct and indirect fall-through
GetLogsBlockLimit int64 // the maximum size of the block range to use in GetLogs
StateDiffTimeout time.Duration
}
// PublicEthAPI is the eth namespace API
type PublicEthAPI struct {
// Local db backend
B *Backend
rpc *rpc.Client
ethClient *ethclient.Client
config APIConfig
B *Backend
// Proxy node for forwarding cache misses
supportsStateDiff bool // Whether the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss
rpc *rpc.Client
ethClient *ethclient.Client
forwardEthCalls bool // if true, forward eth_call calls directly to the configured proxy node
proxyOnError bool // turn on regular proxy fall-through on errors; needed to test difference between direct and indirect fall-through
}
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
func NewPublicEthAPI(b *Backend, client *rpc.Client, config APIConfig) (*PublicEthAPI, error) {
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardEthCalls, proxyOnError bool) (*PublicEthAPI, error) {
if b == nil {
return nil, errors.New("ipld-eth-server must be configured with an ethereum backend")
}
if config.ForwardEthCalls && client == nil {
if forwardEthCalls && client == nil {
return nil, errors.New("ipld-eth-server is configured to forward eth_calls to proxy node but no proxy node is configured")
}
if config.ForwardGetStorageAt && client == nil {
return nil, errors.New("ipld-eth-server is configured to forward eth_getStorageAt to proxy node but no proxy node is configured")
}
if config.ProxyOnError && client == nil {
if proxyOnError && client == nil {
return nil, errors.New("ipld-eth-server is configured to forward all calls to proxy node on errors but no proxy node is configured")
}
var ethClient *ethclient.Client
@ -93,10 +80,12 @@ func NewPublicEthAPI(b *Backend, client *rpc.Client, config APIConfig) (*PublicE
ethClient = ethclient.NewClient(client)
}
return &PublicEthAPI{
B: b,
rpc: client,
ethClient: ethClient,
config: config,
B: b,
supportsStateDiff: supportsStateDiff,
rpc: client,
ethClient: ethClient,
forwardEthCalls: forwardEthCalls,
proxyOnError: proxyOnError,
}, nil
}
@ -114,7 +103,7 @@ func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.Block
if header != nil && err == nil {
return pea.rpcMarshalHeader(header)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if header, err := pea.ethClient.HeaderByNumber(ctx, big.NewInt(number.Int64())); header != nil && err == nil {
go pea.writeStateDiffAt(number.Int64())
return pea.rpcMarshalHeader(header)
@ -133,7 +122,7 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
}
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var result map[string]interface{}
if err := pea.rpc.CallContext(ctx, &result, "eth_getHeaderByHash", hash); result != nil && err == nil {
go pea.writeStateDiffFor(hash)
@ -164,17 +153,18 @@ func (pea *PublicEthAPI) BlockNumber() hexutil.Uint64 {
}
// GetBlockByNumber returns the requested canonical block.
// - When blockNr is -1 the chain head is returned.
// - We cannot support pending block calls since we do not have an active miner
// - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
// * When blockNr is -1 the chain head is returned.
// * We cannot support pending block calls since we do not have an active miner
// * When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
logrus.Debug("Received getBlockByNumber request for number ", number.Int64())
block, err := pea.B.BlockByNumber(ctx, number)
if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil {
go pea.writeStateDiffAt(number.Int64())
return pea.rpcMarshalBlock(block, true, fullTx)
@ -187,12 +177,13 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
// GetBlockByHash returns the requested block. When fullTx is true all transactions in the block are returned in full
// detail, otherwise only the transaction hash is returned.
func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) {
logrus.Debug("Received getBlockByHash request for hash ", hash.Hex())
block, err := pea.B.BlockByHash(ctx, hash)
if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil {
go pea.writeStateDiffFor(hash)
return pea.rpcMarshalBlock(block, true, fullTx)
@ -205,7 +196,7 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.
func (pea *PublicEthAPI) ChainId() *hexutil.Big {
if pea.B.Config.ChainConfig.ChainID == nil || pea.B.Config.ChainConfig.ChainID.Cmp(big.NewInt(0)) <= 0 {
if pea.config.ProxyOnError {
if pea.proxyOnError {
if id, err := pea.ethClient.ChainID(context.Background()); err == nil {
return (*hexutil.Big)(id)
}
@ -229,14 +220,14 @@ func (pea *PublicEthAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, bloc
if block != nil && err == nil {
uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debugxf(ctx, "uncle with index %s request at block number %d was not found", index.String(), blockNr.Int64())
logrus.Debugf("uncle with index %s request at block number %d was not found", index.String(), blockNr.Int64())
return nil, nil
}
block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64())
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
@ -253,14 +244,14 @@ func (pea *PublicEthAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockH
if block != nil {
uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) {
log.Debugxf(ctx, "uncle with index %s request at block hash %s was not found", index.String(), blockHash.Hex())
logrus.Debugf("uncle with index %s request at block hash %s was not found", index.String(), blockHash.Hex())
return nil, nil
}
block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil {
go pea.writeStateDiffFor(blockHash)
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
@ -277,7 +268,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr
return &n
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64())
@ -295,7 +286,7 @@ func (pea *PublicEthAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash
return &n
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil {
go pea.writeStateDiffFor(blockHash)
@ -319,7 +310,7 @@ func (pea *PublicEthAPI) GetTransactionCount(ctx context.Context, address common
return count, nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var num *hexutil.Uint64
if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
@ -347,7 +338,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByNumber(ctx context.Context, b
return &n
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64())
@ -365,7 +356,7 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByHash(ctx context.Context, blo
return &n
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil {
go pea.writeStateDiffFor(blockHash)
@ -382,7 +373,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context
return newRPCTransactionFromBlockIndex(block, uint64(index))
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64())
@ -399,7 +390,7 @@ func (pea *PublicEthAPI) GetTransactionByBlockHashAndIndex(ctx context.Context,
return newRPCTransactionFromBlockIndex(block, uint64(index))
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
go pea.writeStateDiffFor(blockHash)
@ -415,7 +406,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Cont
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64())
@ -430,7 +421,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
go pea.writeStateDiffFor(blockHash)
@ -452,7 +443,7 @@ func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.H
return NewRPCTransaction(tx, blockHash, blockNumber, index, header.BaseFee), nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByHash", hash); tx != nil && err == nil {
go pea.writeStateDiffFor(hash)
@ -467,9 +458,9 @@ func (pea *PublicEthAPI) GetRawTransactionByHash(ctx context.Context, hash commo
// Retrieve a finalized transaction, or a pooled otherwise
tx, _, _, _, err := pea.B.GetTransaction(ctx, hash)
if tx != nil && err == nil {
return tx.MarshalBinary()
return rlp.EncodeToBytes(tx)
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByHash", hash); tx != nil && err == nil {
go pea.writeStateDiffFor(hash)
@ -479,84 +470,6 @@ func (pea *PublicEthAPI) GetRawTransactionByHash(ctx context.Context, hash commo
return nil, err
}
// accessListResult returns an optional accesslist
// Its the result of the `debug_createAccessList` RPC call.
// It contains an error if the transaction itself failed.
type accessListResult struct {
Accesslist *types.AccessList `json:"accessList"`
Error string `json:"error,omitempty"`
GasUsed hexutil.Uint64 `json:"gasUsed"`
}
// CreateAccessList creates a EIP-2930 type AccessList for the given transaction.
// Reexec and BlockNrOrHash can be specified to create the accessList on top of a certain state.
func (pea *PublicEthAPI) CreateAccessList(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (*accessListResult, error) {
if pea.rpc != nil {
var res *accessListResult
if err := pea.rpc.CallContext(ctx, &res, "eth_createAccessList", args, blockNrOrHash); err != nil {
return nil, err
}
return res, nil
}
return nil, RequiresProxyError{method: "eth_createAccessList"}
}
type feeHistoryResult struct {
OldestBlock *hexutil.Big `json:"oldestBlock"`
Reward [][]*hexutil.Big `json:"reward,omitempty"`
BaseFee []*hexutil.Big `json:"baseFeePerGas,omitempty"`
GasUsedRatio []float64 `json:"gasUsedRatio"`
}
// FeeHistory returns the fee market history.
func (pea *PublicEthAPI) FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) {
if pea.rpc != nil {
var res *feeHistoryResult
if err := pea.rpc.CallContext(ctx, &res, "eth_feeHistory", blockCount, lastBlock, rewardPercentiles); err != nil {
return nil, err
}
return res, nil
}
return nil, RequiresProxyError{method: "eth_feeHistory"}
}
// EstimateGas returns an estimate of the amount of gas needed to execute the
// given transaction against the current pending block.
func (pea *PublicEthAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) {
if pea.rpc != nil {
var res hexutil.Uint64
if err := pea.rpc.CallContext(ctx, &res, "eth_estimateGas", args, blockNrOrHash); err != nil {
return hexutil.Uint64(0), err
}
return res, nil
}
return hexutil.Uint64(0), RequiresProxyError{method: "eth_estimateGas"}
}
// GasPrice returns a suggestion for a gas price for legacy transactions.
func (pea *PublicEthAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) {
if pea.rpc != nil {
var res *hexutil.Big
if err := pea.rpc.CallContext(ctx, &res, "eth_gasPrice"); err != nil {
return nil, err
}
return res, nil
}
return nil, RequiresProxyError{method: "eth_gasPrice"}
}
// MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions.
func (pea *PublicEthAPI) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) {
if pea.rpc != nil {
var res *hexutil.Big
if err := pea.rpc.CallContext(ctx, &res, "eth_maxPriorityFeePerGas"); err != nil {
return nil, err
}
return res, nil
}
return nil, RequiresProxyError{method: "eth_maxPriorityFeePerGas"}
}
/*
Receipts and Logs
@ -569,7 +482,7 @@ func (pea *PublicEthAPI) GetTransactionReceipt(ctx context.Context, hash common.
if receipt != nil && err == nil {
return receipt, nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
if receipt := pea.remoteGetTransactionReceipt(ctx, hash); receipt != nil {
go pea.writeStateDiffFor(hash)
return receipt, nil
@ -595,7 +508,7 @@ func (pea *PublicEthAPI) localGetTransactionReceipt(ctx context.Context, hash co
if err != nil {
return nil, err
}
err = receipts.DeriveFields(pea.B.Config.ChainConfig, blockHash, blockNumber, block.BaseFee(), block.Transactions())
err = receipts.DeriveFields(pea.B.Config.ChainConfig, blockHash, blockNumber, block.Transactions())
if err != nil {
return nil, err
}
@ -604,11 +517,13 @@ func (pea *PublicEthAPI) localGetTransactionReceipt(ctx context.Context, hash co
}
receipt := receipts[index]
signer := SignerForTx(tx)
var signer types.Signer = types.FrontierSigner{}
if tx.Protected() {
signer = types.NewEIP155Signer(tx.ChainId())
}
from, _ := types.Sender(signer, tx)
fields := map[string]interface{}{
"type": hexutil.Uint64(receipt.Type),
"blockHash": blockHash,
"blockNumber": hexutil.Uint64(blockNumber),
"transactionHash": hash,
@ -620,7 +535,6 @@ func (pea *PublicEthAPI) localGetTransactionReceipt(ctx context.Context, hash co
"contractAddress": nil,
"logs": receipt.Logs,
"logsBloom": receipt.Bloom,
"effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice),
}
// Assign receipt status or post state.
@ -666,7 +580,7 @@ func (pea *PublicEthAPI) remoteGetTransactionReceipt(ctx context.Context, hash c
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) {
logs, err := pea.localGetLogs(crit)
if err != nil && pea.config.ProxyOnError {
if err != nil && pea.proxyOnError {
var res []*types.Log
if err := pea.rpc.CallContext(ctx, &res, "eth_getLogs", crit); err == nil {
go pea.writeStateDiffWithCriteria(crit)
@ -700,63 +614,70 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
Topics: topicStrSets,
}
// Begin tx
tx, err := pea.B.DB.Beginx()
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// If we have a blockHash to filter on, fire off single retrieval query
if crit.BlockHash != nil {
var filteredLogs []LogResult
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLogsForBlock(pea.B.DB, filter, crit.BlockHash)
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, 0, crit.BlockHash)
if err != nil {
return nil, err
}
var logs []*types.Log
logs, err = decomposeLogs(filteredLogs)
return logs, err
return decomposeLogs(filteredLogs)
}
// Otherwise, create block range from criteria
// nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number
// geth uses LatestBlockNumber as the default value for both begin and end, so we do the same
lastBlockNumber, err := pea.B.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
startingBlock := crit.FromBlock
endingBlock := crit.ToBlock
if startingBlock == nil {
startingBlock = common.Big0
}
start := lastBlockNumber
if crit.FromBlock != nil {
start, err = pea.B.NormalizeBlockNumber(rpc.BlockNumber(crit.FromBlock.Int64()))
if endingBlock == nil {
endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
endingBlock = big.NewInt(endingBlockInt)
}
end := lastBlockNumber
if crit.ToBlock != nil {
end, err = pea.B.NormalizeBlockNumber(rpc.BlockNumber(crit.ToBlock.Int64()))
start := startingBlock.Int64()
end := endingBlock.Int64()
var logs []*types.Log
for i := start; i <= end; i++ {
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, i, nil)
if err != nil {
return nil, err
}
logCIDs, err := decomposeLogs(filteredLogs)
if err != nil {
return nil, err
}
logs = append(logs, logCIDs...)
}
if pea.config.GetLogsBlockLimit > 0 && (end-start) > pea.config.GetLogsBlockLimit {
return nil, errors.New(
fmt.Sprintf(
"Invalid eth_getLogs request. 'fromBlock'-'toBlock' range too large. Max range: %d",
pea.config.GetLogsBlockLimit,
))
}
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLogsForBlockRange(pea.B.DB, filter, start, end)
if err != nil {
if err := tx.Commit(); err != nil {
return nil, err
}
var logCIDs []*types.Log
logCIDs, err = decomposeLogs(filteredLogs)
if err != nil {
return nil, err
}
return logCIDs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
}
/*
@ -770,14 +691,13 @@ State and Storage
// block numbers are also allowed.
func (pea *PublicEthAPI) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) {
bal, err := pea.localGetBalance(ctx, address, blockNrOrHash)
if err != nil && err != sql.ErrNoRows {
return nil, err
} else if bal != nil {
if bal != nil && err == nil {
return bal, nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var res *hexutil.Big
if err := pea.rpc.CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil
}
}
@ -800,13 +720,6 @@ func (pea *PublicEthAPI) localGetBalance(ctx context.Context, address common.Add
// block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta block
// numbers are also allowed.
func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
if pea.config.ForwardGetStorageAt {
var res hexutil.Bytes
// If forwarding all getStorageAt calls, don't request statediffing.
err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash)
return res, err
}
storageVal, err := pea.B.GetStorageByNumberOrHash(ctx, address, common.HexToHash(key), blockNrOrHash)
if storageVal != nil && err == nil {
var value common.Hash
@ -821,10 +734,10 @@ func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Addres
return value[:], nil
}
if pea.config.ProxyOnError {
log.Warnxf(ctx, "Missing eth_getStorageAt(%s, %s, %s)", address.Hash().String(), key, blockNrOrHash.String())
if pea.proxyOnError {
var res hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil
}
}
@ -840,9 +753,10 @@ func (pea *PublicEthAPI) GetCode(ctx context.Context, address common.Address, bl
if code != nil && err == nil {
return code, nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var res hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil
}
}
@ -859,26 +773,23 @@ func (pea *PublicEthAPI) GetProof(ctx context.Context, address common.Address, s
if proof != nil && err == nil {
return proof, nil
}
if pea.config.ProxyOnError {
if pea.proxyOnError {
var res *AccountResult
if err := pea.rpc.CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil
}
}
return nil, err
}
// this continues to use ipfs-ethdb based geth StateDB as it requires trie access
func (pea *PublicEthAPI) localGetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) {
state, _, err := pea.B.IPLDTrieStateDBAndHeaderByNumberOrHash(ctx, blockNrOrHash)
state, _, err := pea.B.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
storageTrie, err := state.StorageTrie(address)
if storageTrie == nil || err != nil {
return nil, err
}
storageTrie := state.StorageTrie(address)
storageHash := types.EmptyRootHash
codeHash := state.GetCodeHash(address)
storageProof := make([]StorageResult, len(storageKeys))
@ -921,11 +832,6 @@ func (pea *PublicEthAPI) localGetProof(ctx context.Context, address common.Addre
}, state.Error()
}
// GetSlice returns a slice of state or storage nodes from a provided root to a provided path and past it to a certain depth
func (pea *PublicEthAPI) GetSlice(ctx context.Context, path string, depth int, root common.Hash, storage bool) (*GetSliceResponse, error) {
return pea.B.GetSlice(path, depth, root, storage)
}
// revertError is an API error that encompassas an EVM revertal with JSON error
// code and a binary data blob.
type revertError struct {
@ -974,7 +880,7 @@ type OverrideAccount struct {
type StateOverride map[common.Address]OverrideAccount
// Apply overrides the fields of specified accounts into the given state.
func (diff *StateOverride) Apply(state *ipld_direct_state.StateDB) error {
func (diff *StateOverride) Apply(state *state.StateDB) error {
if diff == nil {
return nil
}
@ -1015,13 +921,13 @@ func (diff *StateOverride) Apply(state *ipld_direct_state.StateDB) error {
// Note, this function doesn't make and changes in the state/blockchain and is
// useful to execute and retrieve values.
func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) {
if pea.config.ForwardEthCalls {
if pea.forwardEthCalls {
var hex hexutil.Bytes
err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides)
return hex, err
}
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, defaultEVMTimeout, pea.B.Config.RPCGasCap.Uint64())
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, 5*time.Second, pea.B.Config.RPCGasCap.Uint64())
// If the result contains a revert reason, try to unpack and return it.
if err == nil {
@ -1032,26 +938,22 @@ func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash
}
}
if err != nil && pea.config.ProxyOnError {
if err != nil && pea.proxyOnError {
var hex hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides); hex != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return hex, nil
}
}
if result != nil {
return result.Return(), err
} else {
return nil, err
}
return result.Return(), err
}
func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
defer func(start time.Time) {
log.Debugxf(ctx, "Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
logrus.Debugf("Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
}(time.Now())
state, header, err := b.IPLDDirectStateDBAndHeaderByNumberOrHash(ctx, blockNrOrHash)
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
}
@ -1102,7 +1004,7 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout)
}
if err != nil {
return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.GasLimit)
return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas())
}
return result, nil
}
@ -1110,7 +1012,7 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
// writeStateDiffAtOrFor calls out to the proxy statediffing geth client to fill in a gap in the index
func (pea *PublicEthAPI) writeStateDiffAtOrFor(blockNrOrHash rpc.BlockNumberOrHash) {
// short circuit right away if the proxy doesn't support diffing
if !pea.config.SupportsStateDiff {
if !pea.supportsStateDiff {
return
}
if blockNr, ok := blockNrOrHash.Number(); ok {
@ -1125,51 +1027,68 @@ func (pea *PublicEthAPI) writeStateDiffAtOrFor(blockNrOrHash rpc.BlockNumberOrHa
// writeStateDiffWithCriteria calls out to the proxy statediffing geth client to fill in a gap in the index
func (pea *PublicEthAPI) writeStateDiffWithCriteria(crit filters.FilterCriteria) {
// short circuit right away if the proxy doesn't support diffing
if !pea.config.SupportsStateDiff || crit.BlockHash == nil {
if !pea.supportsStateDiff {
return
}
pea.writeStateDiffFor(*crit.BlockHash)
if crit.BlockHash != nil {
pea.writeStateDiffFor(*crit.BlockHash)
return
}
var start, end int64
if crit.FromBlock != nil {
start = crit.FromBlock.Int64()
}
if crit.ToBlock != nil {
end = crit.ToBlock.Int64()
} else {
end = start
}
for i := start; i <= end; i++ {
pea.writeStateDiffAt(i)
}
}
// writeStateDiffAt calls out to the proxy statediffing geth client to fill in a gap in the index
func (pea *PublicEthAPI) writeStateDiffAt(height int64) {
if !pea.config.SupportsStateDiff {
if !pea.supportsStateDiff {
return
}
// we use a separate context than the one provided by the client
ctx, cancel := context.WithTimeout(context.Background(), pea.config.StateDiffTimeout)
ctx, cancel := context.WithTimeout(context.Background(), 240*time.Second)
defer cancel()
var data json.RawMessage
params := statediff.Params{
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
IncludeCode: true,
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
IncludeCode: true,
}
log.Debugf("Calling statediff_writeStateDiffAt(%d)", height)
if err := pea.rpc.CallContext(ctx, &data, "statediff_writeStateDiffAt", uint64(height), params); err != nil {
log.Errorf("writeStateDiffAt %d failed with err %s", height, err.Error())
logrus.Errorf("writeStateDiffAt %d faild with err %s", height, err.Error())
}
}
// writeStateDiffFor calls out to the proxy statediffing geth client to fill in a gap in the index
func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
if !pea.config.SupportsStateDiff {
if !pea.supportsStateDiff {
return
}
// we use a separate context than the one provided by the client
ctx, cancel := context.WithTimeout(context.Background(), pea.config.StateDiffTimeout)
ctx, cancel := context.WithTimeout(context.Background(), 240*time.Second)
defer cancel()
var data json.RawMessage
params := statediff.Params{
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
IncludeCode: true,
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
IncludeBlock: true,
IncludeReceipts: true,
IncludeTD: true,
IncludeCode: true,
}
log.Debugf("Calling statediff_writeStateDiffFor(%s)", blockHash.Hex())
if err := pea.rpc.CallContext(ctx, &data, "statediff_writeStateDiffFor", blockHash, params); err != nil {
log.Errorf("writeStateDiffFor %s failed with err %s", blockHash.Hex(), err.Error())
logrus.Errorf("writeStateDiffFor %s faild with err %s", blockHash.Hex(), err.Error())
}
}
@ -1177,13 +1096,13 @@ func (pea *PublicEthAPI) writeStateDiffFor(blockHash common.Hash) {
func (pea *PublicEthAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
fields, err := RPCMarshalBlock(b, inclTx, fullTx)
if err != nil {
log.Errorf("error RPC marshalling block with hash %s: %s", b.Hash().String(), err)
logrus.Errorf("error RPC marshalling block with hash %s: %s", b.Hash().String(), err)
return nil, err
}
if inclTx {
td, err := pea.B.GetTd(b.Hash())
if err != nil {
log.Errorf("error getting td for block with hash and number %s, %s: %s", b.Hash().String(), b.Number().String(), err)
logrus.Errorf("error getting td for block with hash and number %s, %s: %s", b.Hash().String(), b.Number().String(), err)
return nil, err
}
fields["totalDifficulty"] = (*hexutil.Big)(td)

View File

@ -14,16 +14,17 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_api_test
package eth_test
import (
"context"
"math/big"
"strconv"
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
sdtypes "github.com/cerc-io/plugeth-statediff/types"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
@ -31,14 +32,13 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
)
var (
@ -50,8 +50,7 @@ var (
blockHash = test_helpers.MockBlock.Header().Hash()
baseFee = test_helpers.MockLondonBlock.BaseFee()
ctx = context.Background()
expectedBlock = map[string]interface{}{
expectedBlock = map[string]interface{}{
"number": (*hexutil.Big)(test_helpers.MockBlock.Number()),
"hash": test_helpers.MockBlock.Hash(),
"parentHash": test_helpers.MockBlock.ParentHash(),
@ -135,9 +134,9 @@ var (
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1, test_helpers.MockBlock.BaseFee())
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2, test_helpers.MockBlock.BaseFee())
expectedLondonTransaction = eth.NewRPCTransaction(test_helpers.MockLondonTransactions[0], test_helpers.MockLondonBlock.Hash(), test_helpers.MockLondonBlock.NumberU64(), 0, test_helpers.MockLondonBlock.BaseFee())
expectRawTx, _ = test_helpers.MockTransactions[0].MarshalBinary()
expectRawTx2, _ = test_helpers.MockTransactions[1].MarshalBinary()
expectRawTx3, _ = test_helpers.MockTransactions[2].MarshalBinary()
expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0])
expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1])
expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2])
expectedReceipt = map[string]interface{}{
"blockHash": blockHash,
"blockNumber": hexutil.Uint64(uint64(number.Int64())),
@ -151,8 +150,6 @@ var (
"logs": test_helpers.MockReceipts[0].Logs,
"logsBloom": test_helpers.MockReceipts[0].Bloom,
"status": hexutil.Uint(test_helpers.MockReceipts[0].Status),
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(100)),
"type": hexutil.Uint64(types.LegacyTxType),
}
expectedReceipt2 = map[string]interface{}{
"blockHash": blockHash,
@ -167,8 +164,6 @@ var (
"logs": test_helpers.MockReceipts[1].Logs,
"logsBloom": test_helpers.MockReceipts[1].Bloom,
"root": hexutil.Bytes(test_helpers.MockReceipts[1].PostState),
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(200)),
"type": hexutil.Uint64(types.LegacyTxType),
}
expectedReceipt3 = map[string]interface{}{
"blockHash": blockHash,
@ -183,81 +178,80 @@ var (
"logs": test_helpers.MockReceipts[2].Logs,
"logsBloom": test_helpers.MockReceipts[2].Bloom,
"root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState),
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(150)),
"type": hexutil.Uint64(types.LegacyTxType),
}
)
var (
db *sqlx.DB
api *eth.PublicEthAPI
chainConfig = params.TestChainConfig
)
var _ = BeforeSuite(func() {
var (
err error
tx interfaces.Batch
)
db = shared.SetupDB()
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err := eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
GroupCacheConfig: &shared.GroupCacheConfig{
StateDB: shared.GroupConfig{
Name: "api_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, eth.APIConfig{StateDiffTimeout: shared.DefaultStateDiffTimeout})
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
ipld := sdtypes.IPLD{
CID: ipld.Keccak256ToCid(ipld.RawBinary, test_helpers.CodeHash.Bytes()).String(),
Content: test_helpers.ContractCode,
}
err = indexAndPublisher.PushIPLD(tx, ipld)
Expect(err).ToNot(HaveOccurred())
for _, node := range test_helpers.MockStateNodes {
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
uncles := test_helpers.MockBlock.Uncles()
uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash()
}
expectedBlock["uncles"] = uncleHashes
// setting chain config to for london block
chainConfig.LondonBlock = big.NewInt(2)
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
})
var _ = AfterSuite(func() { shared.TearDownDB(db) })
var _ = Describe("API", func() {
var (
db *sqlx.DB
api *eth.PublicEthAPI
chainConfig = params.TestChainConfig
)
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
// Note: if you focus one of the tests be sure to focus this and the defered It()
It("test init", func() {
var (
err error
tx interfaces.Batch
)
db = shared.SetupDB()
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err := eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "api_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
ccHash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.ContractCodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred())
for _, node := range test_helpers.MockStateNodes {
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
uncles := test_helpers.MockBlock.Uncles()
uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash()
}
expectedBlock["uncles"] = uncleHashes
// setting chain config to for london block
chainConfig.LondonBlock = big.NewInt(2)
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
})
// Single test db tear down at end of all tests
defer It("test teardown", func() { shared.TearDownDB(db) })
/*
Headers and blocks
@ -332,12 +326,12 @@ var _ = Describe("API", func() {
It("Fetch BaseFee from london block by block number, returns `nil` for legacy block", func() {
block, err := api.GetBlockByNumber(ctx, number, false)
Expect(err).ToNot(HaveOccurred())
_, ok := block["baseFeePerGas"]
_, ok := block["baseFee"]
Expect(ok).To(Equal(false))
block, err = api.GetBlockByNumber(ctx, londonBlockNum, false)
Expect(err).ToNot(HaveOccurred())
Expect(block["baseFeePerGas"]).To(Equal((*hexutil.Big)(baseFee)))
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
})
It("Retrieves a block by number with uncles in correct order", func() {
block, err := api.GetBlockByNumber(ctx, londonBlockNum, false)
@ -386,11 +380,11 @@ var _ = Describe("API", func() {
It("Fetch BaseFee from london block by block hash, returns `nil` for legacy block", func() {
block, err := api.GetBlockByHash(ctx, test_helpers.MockBlock.Hash(), true)
Expect(err).ToNot(HaveOccurred())
_, ok := block["baseFeePerGas"]
_, ok := block["baseFee"]
Expect(ok).To(Equal(false))
block, err = api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
Expect(err).ToNot(HaveOccurred())
Expect(block["baseFeePerGas"]).To(Equal((*hexutil.Big)(baseFee)))
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
})
It("Retrieves a block by hash with uncles in correct order", func() {
block, err := api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)

File diff suppressed because it is too large Load Diff

View File

@ -17,30 +17,20 @@
package eth
import (
"bytes"
"context"
"encoding/json"
"fmt"
"math/big"
nodeiter "github.com/cerc-io/eth-iterator-utils"
"github.com/cerc-io/plugeth-statediff/utils"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/state"
)
var nullHashBytes = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
var emptyCodeHash = crypto.Keccak256([]byte{})
// RPCMarshalHeader converts the given header to the RPC output.
// This function is eth/internal so we have to make our own version here...
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
@ -65,7 +55,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
}
if head.BaseFee != nil {
headerMap["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee)
headerMap["baseFee"] = head.BaseFee
}
return headerMap
}
@ -145,21 +135,15 @@ func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa
return nil
}
// SignerForTx returns an appropriate Signer for this Transaction
func SignerForTx(tx *types.Transaction) types.Signer {
// NewRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available).
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction {
var signer types.Signer
if tx.Protected() {
signer = types.LatestSignerForChainID(tx.ChainId())
} else {
signer = types.HomesteadSigner{}
}
return signer
}
// NewRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available).
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction {
signer := SignerForTx(tx)
from, _ := types.Sender(signer, tx)
v, r, s := tx.RawSignatureValues()
result := &RPCTransaction{
@ -203,7 +187,7 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
result.GasPrice = (*hexutil.Big)(price)
} else {
result.GasPrice = (*hexutil.Big)(tx.GasFeeCap())
result.GasPrice = nil
}
}
return result
@ -276,7 +260,7 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By
if index >= uint64(len(txs)) {
return nil
}
blob, _ := txs[index].MarshalBinary()
blob, _ := rlp.EncodeToBytes(txs[index])
return blob
}
@ -316,103 +300,3 @@ func toBlockNumArg(number *big.Int) string {
}
return hexutil.EncodeBig(number)
}
func getIteratorAtPath(t state.Trie, startKey []byte) (trie.NodeIterator, int64) {
startTime := makeTimestamp()
var it trie.NodeIterator
if len(startKey)%2 != 0 {
// Zero-pad for odd-length keys, required by HexToKeyBytes()
startKey = append(startKey, 0)
it = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
} else {
it = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
// Step to the required node (not required if original startKey was odd-length)
it.Next(true)
}
return it, makeTimestamp() - startTime
}
func fillSliceNodeData(
sdb state.Database,
nodesMap map[string]string,
leavesMap map[string]GetSliceResponseAccount,
node StateNode,
nodeElements []interface{},
storage bool,
) (int64, error) {
// Populate the nodes map
nodeValHash := crypto.Keccak256Hash(node.NodeValue)
nodesMap[common.Bytes2Hex(nodeValHash.Bytes())] = common.Bytes2Hex(node.NodeValue)
// Extract account data if it's a Leaf node
leafStartTime := makeTimestamp()
if node.NodeType == Leaf && !storage {
stateLeafKey, storageRoot, code, err := extractContractAccountInfo(sdb, node, nodeElements)
if err != nil {
return 0, fmt.Errorf("GetSlice account lookup error: %s", err.Error())
}
if len(code) > 0 {
// Populate the leaves map
leavesMap[stateLeafKey] = GetSliceResponseAccount{
StorageRoot: storageRoot,
EVMCode: common.Bytes2Hex(code),
}
}
}
return makeTimestamp() - leafStartTime, nil
}
func extractContractAccountInfo(sdb state.Database, node StateNode, nodeElements []interface{}) (string, string, []byte, error) {
var account types.StateAccount
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
return "", "", nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
}
if bytes.Equal(account.CodeHash, emptyCodeHash) {
return "", "", nil, nil
}
// Extract state leaf key
partialPath := utils.CompactToHex(nodeElements[0].([]byte))
valueNodePath := append(node.Path, partialPath...)
encodedPath := utils.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
stateLeafKeyString := common.BytesToHash(leafKey).String()
storageRootString := account.Root.String()
// Extract codeHash and get code
codeHash := common.BytesToHash(account.CodeHash)
codeBytes, err := sdb.ContractCode(codeHash)
if err != nil {
return "", "", nil, err
}
return stateLeafKeyString, storageRootString, codeBytes, nil
}
// IsLeaf checks if the node we are at is a leaf
func IsLeaf(elements []interface{}) (bool, error) {
if len(elements) > 2 {
return false, nil
}
if len(elements) < 2 {
return false, fmt.Errorf("node cannot be less than two elements in length")
}
switch elements[0].([]byte)[0] / 16 {
case '\x00':
return false, nil
case '\x01':
return false, nil
case '\x02':
return true, nil
case '\x03':
return true, nil
default:
return false, fmt.Errorf("unknown hex prefix")
}
}

760
pkg/eth/cid_retriever.go Normal file
View File

@ -0,0 +1,760 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"math/big"
"strconv"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
log "github.com/sirupsen/logrus"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
// Retriever interface for substituting mocks in tests
type Retriever interface {
RetrieveFirstBlockNumber() (int64, error)
RetrieveLastBlockNumber() (int64, error)
Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDWrapper, bool, error)
}
// CIDRetriever satisfies the CIDRetriever interface for ethereum
type CIDRetriever struct {
db *sqlx.DB
gormDB *gorm.DB
}
type IPLDModelRecord struct {
models.IPLDModel
}
// TableName overrides the table name used by IPLD
func (IPLDModelRecord) TableName() string {
return "public.blocks"
}
type HeaderCIDRecord struct {
CID string `gorm:"column:cid"`
BlockHash string `gorm:"primaryKey"`
BlockNumber string `gorm:"primaryKey"`
ParentHash string
Timestamp uint64
StateRoot string
TotalDifficulty string `gorm:"column:td"`
TxRoot string
RctRoot string `gorm:"column:receipt_root"`
UncleRoot string
Bloom []byte
MhKey string
// gorm doesn't check if foreign key exists in database.
// It is required to eager load relations using preload.
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID,BlockNumber;references:BlockHash,BlockNumber"`
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
}
// TableName overrides the table name used by HeaderCIDRecord
func (HeaderCIDRecord) TableName() string {
return "eth.header_cids"
}
type TransactionCIDRecord struct {
CID string `gorm:"column:cid"`
TxHash string `gorm:"primaryKey"`
BlockNumber string `gorm:"primaryKey"`
HeaderID string `gorm:"column:header_id"`
Index int64
Src string
Dst string
MhKey string
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
}
// TableName overrides the table name used by TransactionCIDRecord
func (TransactionCIDRecord) TableName() string {
return "eth.transaction_cids"
}
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
func NewCIDRetriever(db *sqlx.DB) *CIDRetriever {
gormDB, err := gorm.Open(postgres.New(postgres.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
log.Error(err)
return nil
}
return &CIDRetriever{
db: db,
gormDB: gormDB,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (ecr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number DESC LIMIT 1")
return blockNumber, err
}
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDWrapper, bool, error) {
log.Debug("retrieving cids")
// Begin new db tx
tx, err := ecr.db.Beginx()
if err != nil {
return nil, true, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
// Retrieve cached header CIDs at this block height
var headers []models.HeaderModel
headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
log.Error("header cid retrieval error", err)
return nil, true, err
}
cws := make([]CIDWrapper, len(headers))
empty := true
for i, header := range headers {
cw := new(CIDWrapper)
cw.BlockNumber = big.NewInt(blockNumber)
if !filter.HeaderFilter.Off {
cw.Header = header
empty = false
if filter.HeaderFilter.Uncles {
// Retrieve uncle cids for this header id
var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.BlockHash)
if err != nil {
log.Error("uncle cid retrieval error")
return nil, true, err
}
cw.Uncles = uncleCIDs
}
}
// Retrieve cached trx CIDs
if !filter.TxFilter.Off {
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.BlockHash)
if err != nil {
log.Error("transaction cid retrieval error")
return nil, true, err
}
if len(cw.Transactions) > 0 {
empty = false
}
}
trxHashes := make([]string, len(cw.Transactions))
for j, t := range cw.Transactions {
trxHashes[j] = t.TxHash
}
// Retrieve cached receipt CIDs
if !filter.ReceiptFilter.Off {
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, filter.ReceiptFilter, 0, header.BlockHash, trxHashes)
if err != nil {
log.Error("receipt cid retrieval error")
return nil, true, err
}
if len(cw.Receipts) > 0 {
empty = false
}
}
// Retrieve cached state CIDs
if !filter.StateFilter.Off {
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.BlockHash)
if err != nil {
log.Error("state cid retrieval error")
return nil, true, err
}
if len(cw.StateNodes) > 0 {
empty = false
}
}
// Retrieve cached storage CIDs
if !filter.StorageFilter.Off {
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.BlockHash)
if err != nil {
log.Error("storage cid retrieval error")
return nil, true, err
}
if len(cw.StorageNodes) > 0 {
empty = false
}
}
cws[i] = *cw
}
return cws, empty, err
}
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]models.HeaderModel, 0)
pgStr := `SELECT CAST(block_number as Text), block_hash, parent_hash, cid, mh_key, CAST(td as Text), node_id,
CAST(reward as Text), state_root, uncle_root,tx_root, receipt_root, bloom, timestamp, times_validated, coinbase
FROM eth.header_cids
WHERE block_number = $1`
return headers, tx.Select(&headers, pgStr, blockNumber)
}
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
log.Debug("retrieving uncle cids for block id ", headerID)
headers := make([]models.UncleModel, 0)
pgStr := `SELECT CAST(block_number as Text), header_id, block_hash, parent_hash, cid, mh_key, CAST(reward as text)
FROM eth.uncle_cids
WHERE header_id = $1`
return headers, tx.Select(&headers, pgStr, headerID)
}
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID string) ([]models.TxModel, error) {
log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3)
results := make([]models.TxModel, 0)
id := 1
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
transaction_cids.header_id, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data, transaction_cids.tx_type
FROM eth.transaction_cids
INNER JOIN eth.header_cids ON (
transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
)
WHERE header_cids.block_hash = $%d`, id)
args = append(args, headerID)
id++
if len(txFilter.Dst) > 0 {
pgStr += fmt.Sprintf(` AND transaction_cids.dst = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(txFilter.Dst))
id++
}
if len(txFilter.Src) > 0 {
pgStr += fmt.Sprintf(` AND transaction_cids.src = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(txFilter.Src))
}
pgStr += ` ORDER BY transaction_cids.index`
return results, tx.Select(&results, pgStr, args...)
}
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
for i, topicSet := range topics {
if len(topicSet) == 0 {
continue
}
if !first {
pgStr += " AND"
} else {
first = false
}
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
args = append(args, pq.Array(topicSet))
*id++
}
return pgStr, args
}
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
if len(rctFilter.LogAddresses) > 0 {
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
}
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
}
return pgStr, args
}
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
rctCond := " AND (receipt_cids.tx_id = ANY ( "
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
}
pgStr += ")"
// Filter on txHashes if there are any, and we are matching txs
if rctFilter.MatchTxs && len(txHashes) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += rctCond + logQuery
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
pgStr += ")"
// Filter on txHashes if there are any, and we are matching txs
if rctFilter.MatchTxs && len(txHashes) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
// If there are no contract addresses or topics to filter on,
// Filter on txHashes if there are any, and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
}
return pgStr, args
}
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids with block hash", blockHash.String())
args := make([]interface{}, 0, 4)
id := 1
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index, data,
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
AND eth.log_cids.header_id = eth.receipt_cids.header_id
AND eth.log_cids.block_number = eth.receipt_cids.block_number
AND receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
AND log_cids.leaf_mh_key = blocks.key
AND log_cids.block_number = blocks.block_number
AND header_cids.block_hash = $1`
args = append(args, blockHash.String())
id++
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
}
// RetrieveFilteredLog retrieves and returns all the log CIDs provided blockHeight or blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
AND eth.log_cids.header_id = eth.receipt_cids.header_id
AND eth.log_cids.block_number = eth.receipt_cids.block_number
AND receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number`
id := 1
if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
args = append(args, blockNumber)
id++
}
if blockHash != nil {
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
args = append(args, blockHash.String())
id++
}
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
}
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
// filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash string, txHashes []string) ([]models.ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 5)
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id,
receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number`
id := 1
if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
args = append(args, blockNumber)
id++
}
if blockHash != "" {
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
args = append(args, blockHash)
id++
}
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, txHashes)
pgStr += ` ORDER BY transaction_cids.index`
receiptCIDs := make([]models.ReceiptModel, 0)
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
}
func hasTopics(topics [][]string) bool {
for _, topicSet := range topics {
if len(topicSet) > 0 {
return true
}
}
return false
}
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
log.Debug("retrieving state cids for header id ", headerID)
args := make([]interface{}, 0, 2)
pgStr := `SELECT CAST(state_cids.block_number as Text), state_cids.header_id,
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
FROM eth.state_cids
INNER JOIN eth.header_cids ON (
state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
)
WHERE header_cids.block_hash = $1`
args = append(args, headerID)
addrLen := len(stateFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range stateFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
pgStr += ` AND state_cids.state_leaf_key = ANY($2::VARCHAR(66)[])`
args = append(args, pq.Array(keys))
}
if !stateFilter.IntermediateNodes {
pgStr += ` AND state_cids.node_type = 2`
}
stateNodeCIDs := make([]models.StateNodeModel, 0)
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
}
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for header id ", headerID)
args := make([]interface{}, 0, 3)
pgStr := `SELECT CAST(storage_cids.block_number as Text), storage_cids.header_id, storage_cids.storage_leaf_key,
storage_cids.node_type, storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path,
state_cids.state_leaf_key
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.header_id = state_cids.header_id
AND storage_cids.state_path = state_cids.state_path
AND storage_cids.block_number = state_cids.block_number
AND state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
AND header_cids.block_hash = $1`
args = append(args, headerID)
id := 2
addrLen := len(storageFilter.Addresses)
if addrLen > 0 {
keys := make([]string, addrLen)
for i, addr := range storageFilter.Addresses {
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
}
pgStr += fmt.Sprintf(` AND state_cids.state_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(keys))
id++
}
if len(storageFilter.StorageKeys) > 0 {
pgStr += fmt.Sprintf(` AND storage_cids.storage_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
args = append(args, pq.Array(storageFilter.StorageKeys))
}
if !storageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.node_type = 2`
}
storageNodeCIDs := make([]models.StorageNodeWithStateKeyModel, 0)
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
}
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
log.Debug("retrieving block cids for block hash ", blockHash.String())
// Begin new db tx
tx, err := ecr.db.Beginx()
if err != nil {
return models.HeaderModel{}, nil, nil, nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
var headerCID models.HeaderModel
headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash)
if err != nil {
log.Error("header cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
if err != nil {
return models.HeaderModel{}, nil, nil, nil, err
}
var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
if err != nil {
log.Error("uncle cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
var txCIDs []models.TxModel
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash, blockNumber)
if err != nil {
log.Error("tx cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
txHashes := make([]string, len(txCIDs))
for i, txCID := range txCIDs {
txHashes[i] = txCID.TxHash
}
var rctCIDs []models.ReceiptModel
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID.BlockHash, txHashes, blockNumber)
if err != nil {
log.Error("rct cid retrieval error")
}
return headerCID, uncleCIDs, txCIDs, rctCIDs, err
}
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
log.Debug("retrieving block cids for block number ", blockNumber)
// Begin new db tx
tx, err := ecr.db.Beginx()
if err != nil {
return models.HeaderModel{}, nil, nil, nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
var headerCID []models.HeaderModel
headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil {
log.Error("header cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
if len(headerCID) < 1 {
return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
}
var uncleCIDs []models.UncleModel
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].BlockHash)
if err != nil {
log.Error("uncle cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
var txCIDs []models.TxModel
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash, blockNumber)
if err != nil {
log.Error("tx cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err
}
txHashes := make([]string, len(txCIDs))
for i, txCID := range txCIDs {
txHashes[i] = txCID.TxHash
}
var rctCIDs []models.ReceiptModel
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID[0].BlockHash, txHashes, blockNumber)
if err != nil {
log.Error("rct cid retrieval error")
}
return headerCID[0], uncleCIDs, txCIDs, rctCIDs, err
}
// RetrieveHeaderCIDByHash returns the header for the given block hash
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
log.Debug("retrieving header cids for block hash ", blockHash.String())
pgStr := `SELECT block_hash, CAST(block_number as Text), parent_hash, cid, mh_key, CAST(td as Text),
state_root, uncle_root, tx_root, receipt_root, bloom, timestamp FROM eth.header_cids
WHERE block_hash = $1`
var headerCID models.HeaderModel
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
}
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string, blockNumber int64) ([]models.TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT CAST(block_number as Text), header_id, index, tx_hash, cid, mh_key,
dst, src, tx_data, tx_type, value
FROM eth.transaction_cids
WHERE header_id = $1 AND block_number = $2
ORDER BY index`
var txCIDs []models.TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
}
// RetrieveReceiptCIDsByByHeaderIDAndTxIDs retrieves receipt CIDs by their associated tx IDs for the given header id
func (ecr *CIDRetriever) RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx *sqlx.Tx, headerID string, txHashes []string, blockNumber int64) ([]models.ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id, receipt_cids.leaf_cid,
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
FROM eth.receipt_cids, eth.transaction_cids
WHERE tx_id = ANY($2)
AND receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
AND transaction_cids.header_id = $1
AND transaction_cids.block_number = $3
ORDER BY transaction_cids.index`
var rctCIDs []models.ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, headerID, pq.Array(txHashes), blockNumber)
}
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
var headerCIDs []HeaderCIDRecord
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
}).Joins("IPLD").Find(&headerCIDs, "header_cids.block_number = ?", blockNumber).Error
if err != nil {
log.Error("header cid retrieval error")
return nil, err
}
return headerCIDs, nil
}
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
var headerCIDs []HeaderCIDRecord
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
}).Joins("IPLD").Find(&headerCIDs, "block_hash = ?", blockHash.String()).Error
if err != nil {
log.Error("header cid retrieval error")
return HeaderCIDRecord{}, err
}
if len(headerCIDs) == 0 {
return HeaderCIDRecord{}, errHeaderHashNotFound
} else if len(headerCIDs) > 1 {
return HeaderCIDRecord{}, errMultipleHeadersForHash
}
return headerCIDs[0], nil
}
// RetrieveTxCIDByHash returns the tx for the given tx hash
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
log.Debug("retrieving tx cid for tx hash ", txHash)
var txCIDs []TransactionCIDRecord
err := ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
if err != nil {
log.Error("header cid retrieval error")
return TransactionCIDRecord{}, err
}
if len(txCIDs) == 0 {
return TransactionCIDRecord{}, errTxHashNotFound
} else if len(txCIDs) > 1 {
// a transaction can be part of a only one canonical block
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
}
return txCIDs[0], nil
}

View File

@ -0,0 +1,538 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"math/big"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/trie"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
openFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{},
TxFilter: eth.TxFilter{},
ReceiptFilter: eth.ReceiptFilter{},
StateFilter: eth.StateFilter{},
StorageFilter: eth.StorageFilter{},
}
rctAddressFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
LogAddresses: []string{test_helpers.Address.String()},
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctTopicsFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000004"}},
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctTopicsAndAddressFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
Topics: [][]string{
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
{"0x0000000000000000000000000000000000000000000000000000000000000006"},
},
LogAddresses: []string{test_helpers.Address.String()},
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctTopicsAndAddressFilterFail = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
Topics: [][]string{
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
{"0x0000000000000000000000000000000000000000000000000000000000000007"}, // This topic won't match on the mocks.Address.String() contract receipt
},
LogAddresses: []string{test_helpers.Address.String()},
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctAddressesAndTopicFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000005"}},
LogAddresses: []string{test_helpers.Address.String(), test_helpers.AnotherAddress.String()},
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctsForAllCollectedTrxs = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
ReceiptFilter: eth.ReceiptFilter{
MatchTxs: true,
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
LogAddresses: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
rctsForSelectCollectedTrxs = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Dst: []string{test_helpers.AnotherAddress.String()}, // We only filter for one of the trxs so we will only get the one corresponding receipt
},
ReceiptFilter: eth.ReceiptFilter{
MatchTxs: true,
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
LogAddresses: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
},
StateFilter: eth.StateFilter{
Off: true,
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
stateFilter = eth.SubscriptionSettings{
Start: big.NewInt(0),
End: big.NewInt(1),
HeaderFilter: eth.HeaderFilter{
Off: true,
},
TxFilter: eth.TxFilter{
Off: true,
},
ReceiptFilter: eth.ReceiptFilter{
Off: true,
},
StateFilter: eth.StateFilter{
Addresses: []string{test_helpers.AccountAddresss.Hex()},
},
StorageFilter: eth.StorageFilter{
Off: true,
},
}
)
var _ = Describe("Retriever", func() {
var (
db *sqlx.DB
diffIndexer interfaces.StateDiffIndexer
retriever *eth.CIDRetriever
)
BeforeEach(func() {
db = shared.SetupDB()
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
retriever = eth.NewCIDRetriever(db)
})
AfterEach(func() {
shared.TearDownDB(db)
})
Describe("Retrieve", func() {
BeforeEach(func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
for _, node := range test_helpers.MockStateNodes {
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
})
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
type rctCIDAndMHKeyResult struct {
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
}
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
Expect(err).ToNot(HaveOccurred())
cids, empty, err := retriever.Retrieve(openFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids)).To(Equal(1))
Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
expectedHeaderCID := test_helpers.MockCIDWrapper.Header
expectedHeaderCID.BlockHash = cids[0].Header.BlockHash
expectedHeaderCID.NodeID = cids[0].Header.NodeID
Expect(cids[0].Header).To(Equal(expectedHeaderCID))
Expect(len(cids[0].Transactions)).To(Equal(4))
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
Expect(len(cids[0].Receipts)).To(Equal(4))
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
Expect(len(cids[0].StateNodes)).To(Equal(2))
for _, stateNode := range cids[0].StateNodes {
if stateNode.CID == test_helpers.State1CID.String() {
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex()))
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
}
if stateNode.CID == test_helpers.State2CID.String() {
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.AccountLeafKey).Hex()))
Expect(stateNode.NodeType).To(Equal(2))
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
}
}
Expect(len(cids[0].StorageNodes)).To(Equal(1))
expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes
expectedStorageNodeCIDs[0].HeaderID = cids[0].StorageNodes[0].HeaderID
expectedStorageNodeCIDs[0].StatePath = cids[0].StorageNodes[0].StatePath
Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs))
})
It("Applies filters from the provided config.Subscription", func() {
type rctCIDAndMHKeyResult struct {
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
}
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids1)).To(Equal(1))
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids1[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids1[0].Transactions)).To(Equal(0))
Expect(len(cids1[0].StateNodes)).To(Equal(0))
Expect(len(cids1[0].StorageNodes)).To(Equal(0))
Expect(len(cids1[0].Receipts)).To(Equal(1))
expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids2)).To(Equal(1))
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids2[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids2[0].Transactions)).To(Equal(0))
Expect(len(cids2[0].StateNodes)).To(Equal(0))
Expect(len(cids2[0].StorageNodes)).To(Equal(0))
Expect(len(cids2[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids3)).To(Equal(1))
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids3[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids3[0].Transactions)).To(Equal(0))
Expect(len(cids3[0].StateNodes)).To(Equal(0))
Expect(len(cids3[0].StorageNodes)).To(Equal(0))
Expect(len(cids3[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids4)).To(Equal(1))
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids4[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids4[0].Transactions)).To(Equal(0))
Expect(len(cids4[0].StateNodes)).To(Equal(0))
Expect(len(cids4[0].StorageNodes)).To(Equal(0))
Expect(len(cids4[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids5)).To(Equal(1))
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids5[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids5[0].Transactions)).To(Equal(4))
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
Expect(len(cids5[0].StateNodes)).To(Equal(0))
Expect(len(cids5[0].StorageNodes)).To(Equal(0))
Expect(len(cids5[0].Receipts)).To(Equal(4))
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids6)).To(Equal(1))
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids6[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids6[0].Transactions)).To(Equal(1))
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
expectedTxCID.TxHash = cids6[0].Transactions[0].TxHash
expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID
Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID))
Expect(len(cids6[0].StateNodes)).To(Equal(0))
Expect(len(cids6[0].StorageNodes)).To(Equal(0))
Expect(len(cids6[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue())
Expect(len(cids7)).To(Equal(1))
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids7[0].Header).To(Equal(models.HeaderModel{}))
Expect(len(cids7[0].Transactions)).To(Equal(0))
Expect(len(cids7[0].Receipts)).To(Equal(0))
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
Expect(len(cids7[0].StateNodes)).To(Equal(1))
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
BlockNumber: "1",
HeaderID: cids7[0].StateNodes[0].HeaderID,
NodeType: 2,
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
CID: test_helpers.State2CID.String(),
MhKey: test_helpers.State2MhKey,
Path: []byte{'\x0c'},
}))
_, empty, err = retriever.Retrieve(rctTopicsAndAddressFilterFail, 1)
Expect(err).ToNot(HaveOccurred())
Expect(empty).To(BeTrue())
})
})
Describe("RetrieveFirstBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveFirstBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the first block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the first block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the first block that has data in the database", func() {
payload1 := test_helpers.MockConvertedPayload
payload1.Block = newMockBlock(1010101)
payload2 := payload1
payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5)))
})
})
Describe("RetrieveLastBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveLastBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the latest block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload1 := test_helpers.MockConvertedPayload
payload1.Block = newMockBlock(1010101)
payload2 := payload1
payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
})
})
func newMockBlock(blockNumber uint64) *types.Block {
header := test_helpers.MockHeader
header.Number.SetUint64(blockNumber)
return types.NewBlock(&test_helpers.MockHeader, test_helpers.MockTransactions, nil, test_helpers.MockReceipts, new(trie.Trie))
}

View File

@ -1,17 +0,0 @@
package eth
import "fmt"
type RequiresProxyError struct {
method string
}
var _ error = RequiresProxyError{}
func (e RequiresProxyError) SetMethod(method string) {
e.method = method
}
func (e RequiresProxyError) Error() string {
return fmt.Sprintf("%s requires a configured proxy geth node", e.method)
}

532
pkg/eth/eth_state_test.go Normal file
View File

@ -0,0 +1,532 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"bytes"
"context"
"io/ioutil"
"math/big"
"time"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
parsedABI abi.ABI
)
func init() {
// load abi
abiBytes, err := ioutil.ReadFile("./test_helpers/abi.json")
if err != nil {
panic(err)
}
parsedABI, err = abi.JSON(bytes.NewReader(abiBytes))
if err != nil {
panic(err)
}
}
var _ = Describe("eth state reading tests", func() {
const chainLength = 5
var (
blocks []*types.Block
receipts []types.Receipts
chain *core.BlockChain
db *sqlx.DB
api *eth.PublicEthAPI
backend *eth.Backend
chainConfig = params.TestChainConfig
mockTD = big.NewInt(1337)
expectedCanonicalHeader map[string]interface{}
)
It("test init", func() {
// db and type initializations
var err error
db = shared.SetupDB()
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "eth_state_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
// make the test blockchain (and state)
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen)
params := statediff.Params{
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
}
canonicalHeader := blocks[1].Header()
expectedCanonicalHeader = map[string]interface{}{
"number": (*hexutil.Big)(canonicalHeader.Number),
"hash": canonicalHeader.Hash(),
"parentHash": canonicalHeader.ParentHash,
"nonce": canonicalHeader.Nonce,
"mixHash": canonicalHeader.MixDigest,
"sha3Uncles": canonicalHeader.UncleHash,
"logsBloom": canonicalHeader.Bloom,
"stateRoot": canonicalHeader.Root,
"miner": canonicalHeader.Coinbase,
"difficulty": (*hexutil.Big)(canonicalHeader.Difficulty),
"extraData": hexutil.Bytes([]byte{}),
"size": hexutil.Uint64(canonicalHeader.Size()),
"gasLimit": hexutil.Uint64(canonicalHeader.GasLimit),
"gasUsed": hexutil.Uint64(canonicalHeader.GasUsed),
"timestamp": hexutil.Uint64(canonicalHeader.Time),
"transactionsRoot": canonicalHeader.TxHash,
"receiptsRoot": canonicalHeader.ReceiptHash,
"totalDifficulty": (*hexutil.Big)(mockTD),
}
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
builder := statediff.NewBuilder(chain.StateCache())
for i, block := range blocks {
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = receipts[i-1]
}
diff, err := builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred())
tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred())
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node, block.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
}
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred())
hash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, hash)
Expect(err).ToNot(HaveOccurred())
// wait for tx batch process to complete.
time.Sleep(10000 * time.Millisecond)
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
})
defer It("test teardown", func() {
shared.TearDownDB(db)
chain.Stop()
})
Describe("eth_call", func() {
It("Applies call args (tx data) on top of state, returning the result (e.g. a Getter method call)", func() {
data, err := parsedABI.Pack("data")
Expect(err).ToNot(HaveOccurred())
bdata := hexutil.Bytes(data)
callArgs := eth.CallArgs{
To: &test_helpers.ContractAddr,
Data: &bdata,
}
// Before contract deployment, returns nil
res, err := api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(0), nil)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(BeNil())
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(1), nil)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(BeNil())
// After deployment
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(2), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(3), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(4), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(5), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"))
Expect(res).To(Equal(expectedRes))
})
})
var (
expectedContractBalance = (*hexutil.Big)(common.Big0)
expectedBankBalanceBlock0 = (*hexutil.Big)(test_helpers.TestBankFunds)
expectedAcct1BalanceBlock1 = (*hexutil.Big)(big.NewInt(10000))
expectedBankBalanceBlock1 = (*hexutil.Big)(new(big.Int).Sub(test_helpers.TestBankFunds, big.NewInt(10000)))
expectedAcct2BalanceBlock2 = (*hexutil.Big)(big.NewInt(1000))
expectedBankBalanceBlock2 = (*hexutil.Big)(new(big.Int).Sub(expectedBankBalanceBlock1.ToInt(), big.NewInt(1000)))
expectedAcct2BalanceBlock3 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock2.ToInt(), test_helpers.MiningReward))
expectedAcct2BalanceBlock4 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock3.ToInt(), test_helpers.MiningReward))
expectedAcct1BalanceBlock5 = (*hexutil.Big)(new(big.Int).Add(expectedAcct1BalanceBlock1.ToInt(), test_helpers.MiningReward))
)
Describe("eth_getBalance", func() {
It("Retrieves the eth balance for the provided account address at the block with the provided number", func() {
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock0))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
})
It("Retrieves the eth balance for the provided account address at the block with the provided hash", func() {
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock0))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
})
It("Returns `0` for an account it cannot find the balance for an account at the provided block number", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
})
It("Returns `0` for an error for an account it cannot find the balance for an account at the provided block hash", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
})
})
Describe("eth_getCode", func() {
It("Retrieves the code for the provided contract address at the block with the provided number", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
})
It("Retrieves the code for the provided contract address at the block with the provided hash", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
})
It("Returns `nil` for an account it cannot find the code for", func() {
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(BeEmpty())
})
It("Returns `nil` for a contract that doesn't exist at this height", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(BeEmpty())
})
})
Describe("eth_getStorageAt", func() {
It("Returns empty slice if it tries to access a contract which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Returns empty slice if it tries to access a contract slot which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
// After deployment
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Throws an error for a non-existing block hash", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header for hash not found"))
})
It("Throws an error for a non-existing block number", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header not found"))
})
})
Describe("eth_getHeaderByNumber", func() {
It("Finds the canonical header based on the header's weight relative to others at the provided height", func() {
header, err := api.GetHeaderByNumber(ctx, number)
Expect(err).ToNot(HaveOccurred())
Expect(header).To(Equal(expectedCanonicalHeader))
})
})
})

View File

@ -17,13 +17,19 @@
package eth_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestETHSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ipld-eth-server/pkg/eth")
RunSpecs(t, "eth ipld server eth suite test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

369
pkg/eth/filterer.go Normal file
View File

@ -0,0 +1,369 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
)
// Filterer interface for substituing mocks in tests
type Filterer interface {
Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error)
}
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
type ResponseFilterer struct{}
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
func NewResponseFilterer() *ResponseFilterer {
return &ResponseFilterer{}
}
// Filter is used to filter through eth data to extract and package requested data into a Payload
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) {
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
response := new(IPLDs)
response.TotalDifficulty = payload.TotalDifficulty
if err := s.filterHeaders(filter.HeaderFilter, response, payload); err != nil {
return nil, err
}
txHashes, err := s.filterTransactions(filter.TxFilter, response, payload)
if err != nil {
return nil, err
}
var filterTxs []common.Hash
if filter.ReceiptFilter.MatchTxs {
filterTxs = txHashes
}
if err := s.filerReceipts(filter.ReceiptFilter, response, payload, filterTxs); err != nil {
return nil, err
}
if err := s.filterStateAndStorage(filter.StateFilter, filter.StorageFilter, response, payload); err != nil {
return nil, err
}
response.BlockNumber = payload.Block.Number()
return response, nil
}
return nil, nil
}
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
if !headerFilter.Off {
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
if err != nil {
return err
}
cid, err := ipld.RawdataToCid(ipld.MEthHeader, headerRLP, multihash.KECCAK_256)
if err != nil {
return err
}
response.Header = models.IPLDModel{
BlockNumber: payload.Block.Number().String(),
Data: headerRLP,
Key: cid.String(),
}
if headerFilter.Uncles {
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
for i, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil {
return err
}
cid, err := ipld.RawdataToCid(ipld.MEthHeader, uncleRlp, multihash.KECCAK_256)
if err != nil {
return err
}
response.Uncles[i] = models.IPLDModel{
BlockNumber: uncle.Number.String(),
Data: uncleRlp,
Key: cid.String(),
}
}
}
}
return nil
}
func checkRange(start, end, actual int64) bool {
if (end <= 0 || end >= actual) && start <= actual {
return true
}
return false
}
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) {
var trxHashes []common.Hash
if !trxFilter.Off {
trxLen := len(payload.Block.Body().Transactions)
trxHashes = make([]common.Hash, 0, trxLen)
response.Transactions = make([]models.IPLDModel, 0, trxLen)
for i, trx := range payload.Block.Body().Transactions {
// TODO: check if want corresponding receipt and if we do we must include this transaction
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
trxBuffer := new(bytes.Buffer)
if err := trx.EncodeRLP(trxBuffer); err != nil {
return nil, err
}
data := trxBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MEthTx, data, multihash.KECCAK_256)
if err != nil {
return nil, err
}
response.Transactions = append(response.Transactions, models.IPLDModel{
Data: data,
Key: cid.String(),
})
trxHashes = append(trxHashes, trx.Hash())
}
}
}
return trxHashes, nil
}
// checkTransactionAddrs returns true if either the transaction src and dst are one of the wanted src and dst addresses
func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst string) bool {
// If we aren't filtering for any addresses, every transaction is a go
if len(wantedDst) == 0 && len(wantedSrc) == 0 {
return true
}
for _, src := range wantedSrc {
if src == actualSrc {
return true
}
}
for _, dst := range wantedDst {
if dst == actualDst {
return true
}
}
return false
}
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off {
response.Receipts = make([]models.IPLDModel, 0, len(payload.Receipts))
rctLeafCID, rctIPLDData, err := GetRctLeafNodeData(payload.Receipts)
if err != nil {
return err
}
for idx, receipt := range payload.Receipts {
// topics is always length 4
topics := make([][]string, 4)
contracts := make([]string, len(receipt.Logs))
for _, l := range receipt.Logs {
contracts = append(contracts, l.Address.String())
for idx, t := range l.Topics {
topics[idx] = append(topics[idx], t.String())
}
}
// TODO: Verify this filter logic.
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
response.Receipts = append(response.Receipts, models.IPLDModel{
BlockNumber: payload.Block.Number().String(),
Data: rctIPLDData[idx],
Key: rctLeafCID[idx].String(),
})
}
}
}
return nil
}
func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics [][]string, wantedAddresses []string, actualAddresses []string, wantedTrxHashes []common.Hash) bool {
// If we aren't filtering for any topics, contracts, or corresponding trxs then all receipts are a go
if len(wantedTopics) == 0 && len(wantedAddresses) == 0 && len(wantedTrxHashes) == 0 {
return true
}
// Keep receipts that are from watched txs
for _, wantedTrxHash := range wantedTrxHashes {
if bytes.Equal(wantedTrxHash.Bytes(), rct.TxHash.Bytes()) {
return true
}
}
// If there are no wanted contract addresses, we keep all receipts that match the topic filter
if len(wantedAddresses) == 0 {
if match := filterMatch(wantedTopics, actualTopics); match == true {
return true
}
}
// If there are wanted contract addresses to filter on
for _, wantedAddr := range wantedAddresses {
// and this is an address of interest
for _, actualAddr := range actualAddresses {
if wantedAddr == actualAddr {
// we keep the receipt if it matches on the topic filter
if match := filterMatch(wantedTopics, actualTopics); match == true {
return true
}
}
}
}
return false
}
// filterMatch returns true if the actualTopics conform to the wantedTopics filter
func filterMatch(wantedTopics, actualTopics [][]string) bool {
// actualTopics should always be length 4, but the members can be nil slices
matches := 0
for i, actualTopicSet := range actualTopics {
if i < len(wantedTopics) && len(wantedTopics[i]) > 0 {
// If we have topics in this filter slot, count as a match if one of the topics matches
matches += slicesShareString(actualTopicSet, wantedTopics[i])
} else {
// Filter slot is either empty or doesn't exist => not matching any topics at this slot => counts as a match
matches++
}
}
if matches == 4 {
return true
}
return false
}
// returns 1 if the two slices have a string in common, 0 if they do not
func slicesShareString(slice1, slice2 []string) int {
for _, str1 := range slice1 {
for _, str2 := range slice2 {
if str1 == str2 {
return 1
}
}
}
return 0
}
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error {
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
response.StorageNodes = make([]StorageNode, 0)
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
for i, addr := range stateFilter.Addresses {
stateAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
}
storageAddressFilters := make([]common.Hash, len(storageFilter.Addresses))
for i, addr := range storageFilter.Addresses {
storageAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
}
storageKeyFilters := make([]common.Hash, len(storageFilter.StorageKeys))
for i, store := range storageFilter.StorageKeys {
storageKeyFilters[i] = common.HexToHash(store)
}
for _, stateNode := range payload.StateNodes {
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
if stateNode.NodeType == sdtypes.Leaf || stateFilter.IntermediateNodes {
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.NodeValue, multihash.KECCAK_256)
if err != nil {
return err
}
response.StateNodes = append(response.StateNodes, StateNode{
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
Path: stateNode.Path,
IPLD: models.IPLDModel{
BlockNumber: payload.Block.Number().String(),
Data: stateNode.NodeValue,
Key: cid.String(),
},
Type: stateNode.NodeType,
})
}
}
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.NodeValue, multihash.KECCAK_256)
if err != nil {
return err
}
response.StorageNodes = append(response.StorageNodes, StorageNode{
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
IPLD: models.IPLDModel{
BlockNumber: payload.Block.Number().String(),
Data: storageNode.NodeValue,
Key: cid.String(),
},
Type: storageNode.NodeType,
Path: storageNode.Path,
})
}
}
}
}
return nil
}
func checkNodeKeys(wantedKeys []common.Hash, actualKey []byte) bool {
// If we aren't filtering for any specific keys, all nodes are a go
if len(wantedKeys) == 0 {
return true
}
for _, key := range wantedKeys {
if bytes.Equal(key.Bytes(), actualKey) {
return true
}
}
return false
}
// GetRctLeafNodeData converts the receipts to receipt trie and returns the receipt leaf node IPLD data and
// corresponding CIDs
func GetRctLeafNodeData(rcts types.Receipts) ([]cid.Cid, [][]byte, error) {
receiptTrie := ipld.NewRctTrie()
for idx, rct := range rcts {
ethRct, err := ipld.NewReceipt(rct)
if err != nil {
return nil, nil, err
}
if err = receiptTrie.Add(idx, ethRct.RawData()); err != nil {
return nil, nil, err
}
}
rctLeafNodes, keys, err := receiptTrie.GetLeafNodes()
if err != nil {
return nil, nil, err
}
ethRctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
ethRctleafNodeData := make([][]byte, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
err = rlp.Decode(r, &idx)
if err != nil {
return nil, nil, err
}
ethRctleafNodeCids[idx] = rln.Cid()
ethRctleafNodeData[idx] = rln.RawData()
}
return ethRctleafNodeCids, ethRctleafNodeData, nil
}

208
pkg/eth/filterer_test.go Normal file
View File

@ -0,0 +1,208 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"bytes"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
var (
filterer *eth.ResponseFilterer
)
var _ = Describe("Filterer", func() {
Describe("FilterResponse", func() {
BeforeEach(func() {
filterer = eth.NewResponseFilterer()
})
It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
iplds, err := filterer.Filter(openFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds).ToNot(BeNil())
Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
var expectedEmptyUncles []models.IPLDModel
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
Expect(len(iplds.Transactions)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue())
Expect(len(iplds.Receipts)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
Expect(len(iplds.StateNodes)).To(Equal(2))
for _, stateNode := range iplds.StateNodes {
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.State2IPLD.RawData(),
Key: test_helpers.State2IPLD.Cid().String(),
}))
}
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.State1IPLD.RawData(),
Key: test_helpers.State1IPLD.Cid().String(),
}))
}
}
Expect(iplds.StorageNodes).To(Equal(test_helpers.MockIPLDs.StorageNodes))
})
It("Applies filters from the provided config.Subscription", func() {
iplds1, err := filterer.Filter(rctAddressFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds1).ToNot(BeNil())
Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds1.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds1.Uncles)).To(Equal(0))
Expect(len(iplds1.Transactions)).To(Equal(0))
Expect(len(iplds1.StorageNodes)).To(Equal(0))
Expect(len(iplds1.StateNodes)).To(Equal(0))
Expect(len(iplds1.Receipts)).To(Equal(1))
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.Rct1IPLD,
Key: test_helpers.Rct1CID.String(),
}))
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds2).ToNot(BeNil())
Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds2.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds2.Uncles)).To(Equal(0))
Expect(len(iplds2.Transactions)).To(Equal(0))
Expect(len(iplds2.StorageNodes)).To(Equal(0))
Expect(len(iplds2.StateNodes)).To(Equal(0))
Expect(len(iplds2.Receipts)).To(Equal(1))
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.Rct1IPLD,
Key: test_helpers.Rct1CID.String(),
}))
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds3).ToNot(BeNil())
Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds3.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds3.Uncles)).To(Equal(0))
Expect(len(iplds3.Transactions)).To(Equal(0))
Expect(len(iplds3.StorageNodes)).To(Equal(0))
Expect(len(iplds3.StateNodes)).To(Equal(0))
Expect(len(iplds3.Receipts)).To(Equal(1))
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.Rct1IPLD,
Key: test_helpers.Rct1CID.String(),
}))
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds4).ToNot(BeNil())
Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds4.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds4.Uncles)).To(Equal(0))
Expect(len(iplds4.Transactions)).To(Equal(0))
Expect(len(iplds4.StorageNodes)).To(Equal(0))
Expect(len(iplds4.StateNodes)).To(Equal(0))
Expect(len(iplds4.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.Rct2IPLD,
Key: test_helpers.Rct2CID.String(),
}))
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds5).ToNot(BeNil())
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds5.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds5.Uncles)).To(Equal(0))
Expect(len(iplds5.Transactions)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue())
Expect(len(iplds5.StorageNodes)).To(Equal(0))
Expect(len(iplds5.StateNodes)).To(Equal(0))
Expect(len(iplds5.Receipts)).To(Equal(4))
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds6).ToNot(BeNil())
Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds6.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds6.Uncles)).To(Equal(0))
Expect(len(iplds6.Transactions)).To(Equal(1))
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
Expect(len(iplds6.StorageNodes)).To(Equal(0))
Expect(len(iplds6.StateNodes)).To(Equal(0))
Expect(len(iplds6.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.Rct2IPLD,
Key: test_helpers.Rct2CID.String(),
}))
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds7).ToNot(BeNil())
Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds7.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds7.Uncles)).To(Equal(0))
Expect(len(iplds7.Transactions)).To(Equal(0))
Expect(len(iplds7.StorageNodes)).To(Equal(0))
Expect(len(iplds7.Receipts)).To(Equal(0))
Expect(len(iplds7.StateNodes)).To(Equal(1))
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
BlockNumber: test_helpers.BlockNumber.String(),
Data: test_helpers.State2IPLD.RawData(),
Key: test_helpers.State2IPLD.Cid().String(),
}))
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
Expect(iplds8).ToNot(BeNil())
Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds8.Header).To(Equal(models.IPLDModel{}))
Expect(len(iplds8.Uncles)).To(Equal(0))
Expect(len(iplds8.Transactions)).To(Equal(0))
Expect(len(iplds8.StorageNodes)).To(Equal(0))
Expect(len(iplds8.StateNodes)).To(Equal(0))
Expect(len(iplds8.Receipts)).To(Equal(0))
})
})
})

View File

@ -17,10 +17,20 @@
package eth
import (
"time"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
)
// Timestamp in milliseconds
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
func ResolveToNodeType(nodeType int) sdtypes.NodeType {
switch nodeType {
case 0:
return sdtypes.Branch
case 1:
return sdtypes.Extension
case 2:
return sdtypes.Leaf
case 3:
return sdtypes.Removed
default:
return sdtypes.Unknown
}
}

47
pkg/eth/interfaces.go Normal file
View File

@ -0,0 +1,47 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
)
// FilterBackend is the geth interface we need to satisfy to use their filters
type FilterBackend interface {
ChainDb() ethdb.Database
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
BloomStatus() (uint64, uint64)
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
}

248
pkg/eth/ipld_fetcher.go Normal file
View File

@ -0,0 +1,248 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"errors"
"fmt"
"math/big"
"strconv"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus"
)
// Fetcher interface for substituting mocks in tests
type Fetcher interface {
Fetch(cids CIDWrapper) (*IPLDs, error)
}
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
// It interfaces directly with PG-IPFS
type IPLDFetcher struct {
db *sqlx.DB
}
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
func NewIPLDFetcher(db *sqlx.DB) *IPLDFetcher {
return &IPLDFetcher{
db: db,
}
}
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
log.Debug("fetching iplds")
iplds := new(IPLDs)
var ok bool
iplds.TotalDifficulty, ok = new(big.Int).SetString(cids.Header.TotalDifficulty, 10)
if !ok {
return nil, errors.New("eth fetcher: unable to set total difficulty")
}
iplds.BlockNumber = cids.BlockNumber
tx, err := f.db.Beginx()
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
iplds.Header, err = f.FetchHeader(tx, cids.Header)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: header fetching error: %s", err.Error())
}
iplds.Uncles, err = f.FetchUncles(tx, cids.Uncles)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: uncle fetching error: %s", err.Error())
}
iplds.Transactions, err = f.FetchTrxs(tx, cids.Transactions)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: transaction fetching error: %s", err.Error())
}
iplds.Receipts, err = f.FetchRcts(tx, cids.Receipts)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: receipt fetching error: %s", err.Error())
}
iplds.StateNodes, err = f.FetchState(tx, cids.StateNodes)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: state fetching error: %s", err.Error())
}
iplds.StorageNodes, err = f.FetchStorage(tx, cids.StorageNodes)
if err != nil {
return nil, fmt.Errorf("eth pg fetcher: storage fetching error: %s", err.Error())
}
return iplds, err
}
// FetchHeader fetches header
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
log.Debug("fetching header ipld")
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
if err != nil {
return models.IPLDModel{}, err
}
headerBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
if err != nil {
return models.IPLDModel{}, err
}
return models.IPLDModel{
BlockNumber: c.BlockNumber,
Data: headerBytes,
Key: c.CID,
}, nil
}
// FetchUncles fetches uncles
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]models.IPLDModel, error) {
log.Debug("fetching uncle iplds")
uncleIPLDs := make([]models.IPLDModel, len(cids))
for i, c := range cids {
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
uncleBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
if err != nil {
return nil, err
}
uncleIPLDs[i] = models.IPLDModel{
BlockNumber: c.BlockNumber,
Data: uncleBytes,
Key: c.CID,
}
}
return uncleIPLDs, nil
}
// FetchTrxs fetches transactions
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IPLDModel, error) {
log.Debug("fetching transaction iplds")
trxIPLDs := make([]models.IPLDModel, len(cids))
for i, c := range cids {
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
txBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
if err != nil {
return nil, err
}
trxIPLDs[i] = models.IPLDModel{
BlockNumber: c.BlockNumber,
Data: txBytes,
Key: c.CID,
}
}
return trxIPLDs, nil
}
// FetchRcts fetches receipts
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]models.IPLDModel, error) {
log.Debug("fetching receipt iplds")
rctIPLDs := make([]models.IPLDModel, len(cids))
for i, c := range cids {
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
rctBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.LeafMhKey, blockNumber)
if err != nil {
return nil, err
}
//nodeVal, err := DecodeLeafNode(rctBytes)
rctIPLDs[i] = models.IPLDModel{
BlockNumber: c.BlockNumber,
Data: rctBytes,
Key: c.LeafCID,
}
}
return rctIPLDs, nil
}
// FetchState fetches state nodes
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]StateNode, error) {
log.Debug("fetching state iplds")
stateNodes := make([]StateNode, 0, len(cids))
for _, stateNode := range cids {
if stateNode.CID == "" {
continue
}
blockNumber, err := strconv.ParseUint(stateNode.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
stateBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, stateNode.MhKey, blockNumber)
if err != nil {
return nil, err
}
stateNodes = append(stateNodes, StateNode{
IPLD: models.IPLDModel{
BlockNumber: stateNode.BlockNumber,
Data: stateBytes,
Key: stateNode.CID,
},
StateLeafKey: common.HexToHash(stateNode.StateKey),
Type: ResolveToNodeType(stateNode.NodeType),
Path: stateNode.Path,
})
}
return stateNodes, nil
}
// FetchStorage fetches storage nodes
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
log.Debug("fetching storage iplds")
storageNodes := make([]StorageNode, 0, len(cids))
for _, storageNode := range cids {
if storageNode.CID == "" || storageNode.StateKey == "" {
continue
}
blockNumber, err := strconv.ParseUint(storageNode.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
storageBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, storageNode.MhKey, blockNumber)
if err != nil {
return nil, err
}
storageNodes = append(storageNodes, StorageNode{
IPLD: models.IPLDModel{
BlockNumber: storageNode.BlockNumber,
Data: storageBytes,
Key: storageNode.CID,
},
StateLeafKey: common.HexToHash(storageNode.StateKey),
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
Type: ResolveToNodeType(storageNode.NodeType),
Path: storageNode.Path,
})
}
return storageNodes, nil
}

View File

@ -0,0 +1,75 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
var _ = Describe("IPLDFetcher", func() {
var (
db *sqlx.DB
pubAndIndexer interfaces.StateDiffIndexer
fetcher *eth.IPLDFetcher
)
Describe("Fetch", func() {
BeforeEach(func() {
var (
err error
tx interfaces.Batch
)
db = shared.SetupDB()
pubAndIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
tx, err = pubAndIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
for _, node := range test_helpers.MockStateNodes {
err = pubAndIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
fetcher = eth.NewIPLDFetcher(db)
})
AfterEach(func() {
shared.TearDownDB(db)
})
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
iplds, err := fetcher.Fetch(*test_helpers.MockCIDWrapper)
Expect(err).ToNot(HaveOccurred())
Expect(iplds).ToNot(BeNil())
Expect(iplds.TotalDifficulty).To(Equal(test_helpers.MockConvertedPayload.TotalDifficulty))
Expect(iplds.BlockNumber).To(Equal(test_helpers.MockConvertedPayload.Block.Number()))
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
Expect(len(iplds.Uncles)).To(Equal(0))
Expect(iplds.Transactions).To(Equal(test_helpers.MockIPLDs.Transactions))
Expect(iplds.Receipts).To(Equal(test_helpers.MockIPLDs.Receipts))
Expect(iplds.StateNodes).To(Equal(test_helpers.MockIPLDs.StateNodes))
Expect(iplds.StorageNodes).To(Equal(test_helpers.MockIPLDs.StorageNodes))
})
})
})

660
pkg/eth/ipld_retriever.go Normal file
View File

@ -0,0 +1,660 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"fmt"
"strconv"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/lib/pq"
)
const (
// node type removed value.
// https://github.com/cerc-io/go-ethereum/blob/271f4d01e7e2767ffd8e0cd469bf545be96f2a84/statediff/indexer/helpers.go#L34
removedNode = 3
RetrieveHeadersByHashesPgStr = `SELECT cid, data
FROM eth.header_cids
INNER JOIN public.blocks ON (
header_cids.mh_key = blocks.key
AND header_cids.block_number = blocks.block_number
)
WHERE block_hash = ANY($1::VARCHAR(66)[])`
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
FROM eth.header_cids
INNER JOIN public.blocks ON (
header_cids.mh_key = blocks.key
AND header_cids.block_number = blocks.block_number
)
WHERE header_cids.block_number = $1`
RetrieveHeaderByHashPgStr = `SELECT cid, data
FROM eth.header_cids
INNER JOIN public.blocks ON (
header_cids.mh_key = blocks.key
AND header_cids.block_number = blocks.block_number
)
WHERE block_hash = $1`
RetrieveUnclesByHashesPgStr = `SELECT cid, data
FROM eth.uncle_cids
INNER JOIN public.blocks ON (
uncle_cids.mh_key = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE block_hash = ANY($1::VARCHAR(66)[])`
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (
uncle_cids.header_id = header_cids.block_hash
AND uncle_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
uncle_cids.mh_key = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE header_cids.block_hash = $1
ORDER BY uncle_cids.parent_hash`
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (
uncle_cids.header_id = header_cids.block_hash
AND uncle_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
uncle_cids.mh_key = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE header_cids.block_number = $1`
RetrieveUncleByHashPgStr = `SELECT cid, data
FROM eth.uncle_cids
INNER JOIN public.blocks ON (
uncle_cids.mh_key = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE block_hash = $1`
RetrieveTransactionsByHashesPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
FROM eth.transaction_cids
INNER JOIN public.blocks ON (
transaction_cids.mh_key = blocks.key
AND transaction_cids.block_number = blocks.block_number
)
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
FROM eth.transaction_cids
INNER JOIN eth.header_cids ON (
transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
transaction_cids.mh_key = blocks.key
AND transaction_cids.block_number = blocks.block_number
)
WHERE block_hash = $1
ORDER BY eth.transaction_cids.index ASC`
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
FROM eth.transaction_cids
INNER JOIN eth.header_cids ON (
transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
transaction_cids.mh_key = blocks.key
AND transaction_cids.block_number = blocks.block_number
)
WHERE header_cids.block_number = $1
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
ORDER BY eth.transaction_cids.index ASC`
RetrieveTransactionByHashPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
FROM eth.transaction_cids
INNER JOIN public.blocks ON (
transaction_cids.mh_key = blocks.key
AND transaction_cids.block_number = blocks.block_number
)
WHERE tx_hash = $1`
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (
receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
)
INNER JOIN public.blocks ON (
receipt_cids.leaf_mh_key = blocks.key
AND receipt_cids.block_number = blocks.block_number
)
WHERE tx_hash = ANY($1::VARCHAR(66)[])
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (
receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
)
INNER JOIN eth.header_cids ON (
transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
receipt_cids.leaf_mh_key = blocks.key
AND receipt_cids.block_number = blocks.block_number
)
WHERE block_hash = $1
ORDER BY eth.transaction_cids.index ASC`
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (
receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
)
INNER JOIN eth.header_cids ON (
transaction_cids.header_id = header_cids.block_hash
AND transaction_cids.block_number = header_cids.block_number
)
INNER JOIN public.blocks ON (
receipt_cids.leaf_mh_key = blocks.key
AND receipt_cids.block_number = blocks.block_number
)
WHERE header_cids.block_number = $1
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
ORDER BY eth.transaction_cids.index ASC`
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (
receipt_cids.tx_id = transaction_cids.tx_hash
AND receipt_cids.header_id = transaction_cids.header_id
AND receipt_cids.block_number = transaction_cids.block_number
)
INNER JOIN public.blocks ON (
receipt_cids.leaf_mh_key = blocks.key
AND receipt_cids.block_number = blocks.block_number
)
WHERE tx_hash = $1
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.block_number, state_cids.node_type
FROM eth.state_cids
INNER JOIN eth.header_cids ON (
state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
)
WHERE state_leaf_key = $1
AND header_cids.block_number <= (SELECT block_number
FROM eth.header_cids
WHERE block_hash = $2)
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
ORDER BY header_cids.block_number DESC
LIMIT 1`
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.node_type
FROM eth.state_cids
INNER JOIN eth.header_cids ON (
state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
)
WHERE state_leaf_key = $1
AND header_cids.block_number <= $2
ORDER BY header_cids.block_number DESC
LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (
storage_cids.header_id = state_cids.header_id
AND storage_cids.state_path = state_cids.state_path
AND storage_cids.block_number = state_cids.block_number
)
INNER JOIN eth.header_cids ON (
state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
)
WHERE state_leaf_key = $1
AND storage_leaf_key = $2
AND header_cids.block_number <= $3
ORDER BY header_cids.block_number DESC
LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.block_number, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (
storage_cids.header_id = state_cids.header_id
AND storage_cids.state_path = state_cids.state_path
AND storage_cids.block_number = state_cids.block_number
)
INNER JOIN eth.header_cids ON (
state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
)
WHERE state_leaf_key = $1
AND storage_leaf_key = $2
AND header_cids.block_number <= (SELECT block_number
FROM eth.header_cids
WHERE block_hash = $3)
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
ORDER BY header_cids.block_number DESC
LIMIT 1`
)
var EmptyNodeValue = make([]byte, common.HashLength)
type rctIpldResult struct {
LeafCID string `db:"leaf_cid"`
Data []byte `db:"data"`
TxHash string `db:"tx_hash"`
}
type ipldResult struct {
CID string `db:"cid"`
Data []byte `db:"data"`
TxHash string `db:"tx_hash"`
}
type IPLDRetriever struct {
db *sqlx.DB
}
func NewIPLDRetriever(db *sqlx.DB) *IPLDRetriever {
return &IPLDRetriever{
db: db,
}
}
// RetrieveHeadersByHashes returns the cids and rlp bytes for the headers corresponding to the provided block hashes
func (r *IPLDRetriever) RetrieveHeadersByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
headerResults := make([]ipldResult, 0)
hashStrs := make([]string, len(hashes))
for i, hash := range hashes {
hashStrs[i] = hash.Hex()
}
if err := r.db.Select(&headerResults, RetrieveHeadersByHashesPgStr, pq.Array(hashStrs)); err != nil {
return nil, nil, err
}
cids := make([]string, len(headerResults))
headers := make([][]byte, len(headerResults))
for i, res := range headerResults {
cids[i] = res.CID
headers[i] = res.Data
}
return cids, headers, nil
}
// RetrieveHeadersByBlockNumber returns the cids and rlp bytes for the headers corresponding to the provided block number
// This can return more than one result since there can be more than one header (non-canonical headers)
func (r *IPLDRetriever) RetrieveHeadersByBlockNumber(number uint64) ([]string, [][]byte, error) {
headerResults := make([]ipldResult, 0)
if err := r.db.Select(&headerResults, RetrieveHeadersByBlockNumberPgStr, number); err != nil {
return nil, nil, err
}
cids := make([]string, len(headerResults))
headers := make([][]byte, len(headerResults))
for i, res := range headerResults {
cids[i] = res.CID
headers[i] = res.Data
}
return cids, headers, nil
}
// RetrieveHeaderByHash returns the cid and rlp bytes for the header corresponding to the provided block hash
func (r *IPLDRetriever) RetrieveHeaderByHash(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
headerResult := new(ipldResult)
return headerResult.CID, headerResult.Data, tx.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
}
// RetrieveUnclesByHashes returns the cids and rlp bytes for the uncles corresponding to the provided uncle hashes
func (r *IPLDRetriever) RetrieveUnclesByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
uncleResults := make([]ipldResult, 0)
hashStrs := make([]string, len(hashes))
for i, hash := range hashes {
hashStrs[i] = hash.Hex()
}
if err := r.db.Select(&uncleResults, RetrieveUnclesByHashesPgStr, pq.Array(hashStrs)); err != nil {
return nil, nil, err
}
cids := make([]string, len(uncleResults))
uncles := make([][]byte, len(uncleResults))
for i, res := range uncleResults {
cids[i] = res.CID
uncles[i] = res.Data
}
return cids, uncles, nil
}
// RetrieveUnclesByBlockHash returns the cids and rlp bytes for the uncles corresponding to the provided block hash (of non-omner root block)
func (r *IPLDRetriever) RetrieveUnclesByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
uncleResults := make([]ipldResult, 0)
if err := tx.Select(&uncleResults, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, err
}
cids := make([]string, len(uncleResults))
uncles := make([][]byte, len(uncleResults))
for i, res := range uncleResults {
cids[i] = res.CID
uncles[i] = res.Data
}
return cids, uncles, nil
}
// RetrieveUnclesByBlockNumber returns the cids and rlp bytes for the uncles corresponding to the provided block number (of non-omner root block)
func (r *IPLDRetriever) RetrieveUnclesByBlockNumber(number uint64) ([]string, [][]byte, error) {
uncleResults := make([]ipldResult, 0)
if err := r.db.Select(&uncleResults, RetrieveUnclesByBlockNumberPgStr, number); err != nil {
return nil, nil, err
}
cids := make([]string, len(uncleResults))
uncles := make([][]byte, len(uncleResults))
for i, res := range uncleResults {
cids[i] = res.CID
uncles[i] = res.Data
}
return cids, uncles, nil
}
// RetrieveUncleByHash returns the cid and rlp bytes for the uncle corresponding to the provided uncle hash
func (r *IPLDRetriever) RetrieveUncleByHash(hash common.Hash) (string, []byte, error) {
uncleResult := new(ipldResult)
return uncleResult.CID, uncleResult.Data, r.db.Get(uncleResult, RetrieveUncleByHashPgStr, hash.Hex())
}
// RetrieveTransactionsByHashes returns the cids and rlp bytes for the transactions corresponding to the provided tx hashes
func (r *IPLDRetriever) RetrieveTransactionsByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
txResults := make([]ipldResult, 0)
hashStrs := make([]string, len(hashes))
for i, hash := range hashes {
hashStrs[i] = hash.Hex()
}
if err := r.db.Select(&txResults, RetrieveTransactionsByHashesPgStr, pq.Array(hashStrs)); err != nil {
return nil, nil, err
}
cids := make([]string, len(txResults))
txs := make([][]byte, len(txResults))
for i, res := range txResults {
cids[i] = res.CID
txs[i] = res.Data
}
return cids, txs, nil
}
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
func (r *IPLDRetriever) RetrieveTransactionsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
txResults := make([]ipldResult, 0)
if err := tx.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, err
}
cids := make([]string, len(txResults))
txs := make([][]byte, len(txResults))
for i, res := range txResults {
cids[i] = res.CID
txs[i] = res.Data
}
return cids, txs, nil
}
// RetrieveTransactionsByBlockNumber returns the cids and rlp bytes for the transactions corresponding to the provided block number
func (r *IPLDRetriever) RetrieveTransactionsByBlockNumber(number uint64) ([]string, [][]byte, error) {
txResults := make([]ipldResult, 0)
if err := r.db.Select(&txResults, RetrieveTransactionsByBlockNumberPgStr, number); err != nil {
return nil, nil, err
}
cids := make([]string, len(txResults))
txs := make([][]byte, len(txResults))
for i, res := range txResults {
cids[i] = res.CID
txs[i] = res.Data
}
return cids, txs, nil
}
// RetrieveTransactionByTxHash returns the cid and rlp bytes for the transaction corresponding to the provided tx hash
func (r *IPLDRetriever) RetrieveTransactionByTxHash(hash common.Hash) (string, []byte, error) {
txResult := new(ipldResult)
return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex())
}
// DecodeLeafNode decodes the leaf node data
func DecodeLeafNode(node []byte) ([]byte, error) {
var nodeElements []interface{}
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
return nil, err
}
ty, err := trie_helpers.CheckKeyType(nodeElements)
if err != nil {
return nil, err
}
if ty != sdtypes.Leaf {
return nil, fmt.Errorf("expected leaf node but found %s", ty)
}
return nodeElements[1].([]byte), nil
}
// RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes
func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) {
rctResults := make([]rctIpldResult, 0)
hashStrs := make([]string, len(hashes))
for i, hash := range hashes {
hashStrs[i] = hash.Hex()
}
if err := r.db.Select(&rctResults, RetrieveReceiptsByTxHashesPgStr, pq.Array(hashStrs)); err != nil {
return nil, nil, err
}
cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults))
for i, res := range rctResults {
cids[i] = res.LeafCID
nodeVal, err := DecodeLeafNode(res.Data)
if err != nil {
return nil, nil, err
}
rcts[i] = nodeVal
}
return cids, rcts, nil
}
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
rctResults := make([]rctIpldResult, 0)
if err := tx.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, nil, err
}
cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults))
txs := make([]common.Hash, len(rctResults))
for i, res := range rctResults {
cids[i] = res.LeafCID
nodeVal, err := DecodeLeafNode(res.Data)
if err != nil {
return nil, nil, nil, err
}
rcts[i] = nodeVal
txs[i] = common.HexToHash(res.TxHash)
}
return cids, rcts, txs, nil
}
// RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) {
rctResults := make([]rctIpldResult, 0)
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil {
return nil, nil, err
}
cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults))
for i, res := range rctResults {
cids[i] = res.LeafCID
nodeVal, err := DecodeLeafNode(res.Data)
if err != nil {
return nil, nil, err
}
rcts[i] = nodeVal
}
return cids, rcts, nil
}
// RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash.
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) {
rctResult := new(rctIpldResult)
if err := r.db.Select(&rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex()); err != nil {
return "", nil, err
}
nodeVal, err := DecodeLeafNode(rctResult.Data)
if err != nil {
return "", nil, err
}
return rctResult.LeafCID, nodeVal, nil
}
type nodeInfo struct {
CID string `db:"cid"`
MhKey string `db:"mh_key"`
BlockNumber string `db:"block_number"`
Data []byte `db:"data"`
NodeType int `db:"node_type"`
StateLeafRemoved bool `db:"state_leaf_removed"`
}
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
// TODO: ensure this handles deleted accounts appropriately
func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Address, hash common.Hash) (string, []byte, error) {
accountResult := new(nodeInfo)
leafKey := crypto.Keccak256Hash(address.Bytes())
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
return "", nil, err
}
if accountResult.NodeType == removedNode {
return "", EmptyNodeValue, nil
}
blockNumber, err := strconv.ParseUint(accountResult.BlockNumber, 10, 64)
if err != nil {
return "", nil, err
}
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, blockNumber)
if err != nil {
return "", nil, err
}
var i []interface{}
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return "", nil, fmt.Errorf("eth IPLDRetriever expected state leaf node rlp to decode into two elements")
}
return accountResult.CID, i[1].([]byte), nil
}
// RetrieveAccountByAddressAndBlockNumber returns the cid and rlp bytes for the account corresponding to the provided address and block number
// This can return a non-canonical account
func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Address, number uint64) (string, []byte, error) {
accountResult := new(nodeInfo)
leafKey := crypto.Keccak256Hash(address.Bytes())
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil {
return "", nil, err
}
if accountResult.NodeType == removedNode {
return "", EmptyNodeValue, nil
}
var err error
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, number)
if err != nil {
return "", nil, err
}
var i []interface{}
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return "", nil, fmt.Errorf("eth IPLDRetriever expected state leaf node rlp to decode into two elements")
}
return accountResult.CID, i[1].([]byte), nil
}
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) (string, []byte, []byte, error) {
storageResult := new(nodeInfo)
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
storageHash := crypto.Keccak256Hash(key.Bytes())
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
return "", nil, nil, err
}
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
return "", EmptyNodeValue, EmptyNodeValue, nil
}
blockNumber, err := strconv.ParseUint(storageResult.BlockNumber, 10, 64)
if err != nil {
return "", nil, nil, err
}
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, blockNumber)
if err != nil {
return "", nil, nil, err
}
var i []interface{}
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
return "", nil, nil, err
}
if len(i) != 2 {
return "", nil, nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
}
return storageResult.CID, storageResult.Data, i[1].([]byte), nil
}
// RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number
// This can retrun a non-canonical value
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(address common.Address, storageLeafKey common.Hash, number uint64) (string, []byte, error) {
storageResult := new(nodeInfo)
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil {
return "", nil, err
}
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
return "", EmptyNodeValue, nil
}
var err error
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, number)
if err != nil {
return "", nil, err
}
var i []interface{}
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
}
if len(i) != 2 {
return "", nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
}
return storageResult.CID, i[1].([]byte), nil
}

View File

@ -1,102 +0,0 @@
package eth
import (
"fmt"
"github.com/ethereum/go-ethereum/rlp"
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie"
)
// NodeType for explicitly setting type of node
type NodeType string
const (
Unknown NodeType = "Unknown"
Branch NodeType = "Branch"
Extension NodeType = "Extension"
Leaf NodeType = "Leaf"
Removed NodeType = "Removed" // used to represent paths which have been emptied
)
func (n NodeType) Int() int {
switch n {
case Branch:
return 0
case Extension:
return 1
case Leaf:
return 2
case Removed:
return 3
default:
return -1
}
}
// CheckKeyType checks what type of key we have
func CheckKeyType(elements []interface{}) (NodeType, error) {
if len(elements) > 2 {
return Branch, nil
}
if len(elements) < 2 {
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
}
switch elements[0].([]byte)[0] / 16 {
case '\x00':
return Extension, nil
case '\x01':
return Extension, nil
case '\x02':
return Leaf, nil
case '\x03':
return Leaf, nil
default:
return Unknown, fmt.Errorf("unknown hex prefix")
}
}
// StateNode holds the data for a single state diff node
type StateNode struct {
NodeType NodeType `json:"nodeType" gencodec:"required"`
Path []byte `json:"path" gencodec:"required"`
NodeValue []byte `json:"value" gencodec:"required"`
StorageNodes []StorageNode `json:"storage"`
LeafKey []byte `json:"leafKey"`
}
// StorageNode holds the data for a single storage diff node
type StorageNode struct {
NodeType NodeType `json:"nodeType" gencodec:"required"`
Path []byte `json:"path" gencodec:"required"`
NodeValue []byte `json:"value" gencodec:"required"`
LeafKey []byte `json:"leafKey"`
}
func ResolveNode(path []byte, node []byte, trieDB *trie.Database) (StateNode, []interface{}, error) {
var nodeElements []interface{}
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
return StateNode{}, nil, err
}
ty, err := CheckKeyType(nodeElements)
if err != nil {
return StateNode{}, nil, err
}
nodePath := make([]byte, len(path))
copy(nodePath, path)
return StateNode{
NodeType: ty,
Path: nodePath,
NodeValue: node,
}, nodeElements, nil
}
// ResolveNodeIt return the state diff node pointed by the iterator.
func ResolveNodeIt(it trie.NodeIterator, trieDB *trie.Database) (StateNode, []interface{}, error) {
node, err := it.NodeBlob(), it.Error()
if err != nil {
return StateNode{}, nil, err
}
return ResolveNode(it.Path(), node, trieDB)
}

View File

@ -1,555 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"bytes"
"fmt"
"math/big"
"strconv"
"github.com/cerc-io/plugeth-statediff/indexer/models"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
)
// Retriever is used for fetching
type Retriever struct {
db *sqlx.DB
gormDB *gorm.DB
}
type IPLDModelRecord struct {
models.IPLDModel
}
// TableName overrides the table name used by IPLD
func (IPLDModelRecord) TableName() string {
return "ipld.blocks"
}
type HeaderCIDRecord struct {
CID string `gorm:"column:cid"`
BlockHash string `gorm:"primaryKey"`
BlockNumber string `gorm:"primaryKey"`
ParentHash string
Timestamp uint64
StateRoot string
TotalDifficulty string `gorm:"column:td"`
TxRoot string
RctRoot string `gorm:"column:receipt_root"`
UnclesHash string
Bloom []byte
// gorm doesn't check if foreign key exists in database.
// It is required to eager load relations using preload.
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID,BlockNumber;references:BlockHash,BlockNumber"`
IPLD IPLDModelRecord `gorm:"foreignKey:CID,BlockNumber;references:Key,BlockNumber"`
}
// TableName overrides the table name used by HeaderCIDRecord
func (HeaderCIDRecord) TableName() string {
return "eth.header_cids"
}
type TransactionCIDRecord struct {
CID string `gorm:"column:cid"`
TxHash string `gorm:"primaryKey"`
BlockNumber string `gorm:"primaryKey"`
HeaderID string `gorm:"column:header_id"`
Index int64
Src string
Dst string
IPLD IPLDModelRecord `gorm:"foreignKey:CID,BlockNumber;references:Key,BlockNumber"`
}
type StateAccountRecord struct {
Nonce uint64 `db:"nonce"`
Balance string `db:"balance"`
Root string `db:"storage_root"`
CodeHash []byte `db:"code_hash"`
Removed bool `db:"removed"`
}
// TableName overrides the table name used by TransactionCIDRecord
func (TransactionCIDRecord) TableName() string {
return "eth.transaction_cids"
}
// NewRetriever returns a pointer to a new Retriever which supports the Retriever interface
func NewRetriever(db *sqlx.DB) *Retriever {
gormDB, err := gorm.Open(postgres.New(postgres.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
log.Error(err)
return nil
}
return &Retriever{
db: db,
gormDB: gormDB,
}
}
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
func (r *Retriever) RetrieveFirstBlockNumber() (int64, error) {
var blockNumber int64
err := r.db.Get(&blockNumber, "SELECT block_number FROM ipld.blocks ORDER BY block_number ASC LIMIT 1")
return blockNumber, err
}
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
func (r *Retriever) RetrieveLastBlockNumber() (int64, error) {
var blockNumber int64
err := r.db.Get(&blockNumber, "SELECT block_number FROM ipld.blocks ORDER BY block_number DESC LIMIT 1")
return blockNumber, err
}
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
for i, topicSet := range topics {
if len(topicSet) == 0 {
continue
}
if !first {
pgStr += " AND"
} else {
first = false
}
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
args = append(args, pq.Array(topicSet))
*id++
}
return pgStr, args
}
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
if len(rctFilter.LogAddresses) > 0 {
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
}
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
}
return pgStr, args
}
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
rctCond := " AND (receipt_cids.tx_id = ANY ( "
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
}
pgStr += ")"
// Filter on txHashes if there are any, and we are matching txs
if rctFilter.MatchTxs && len(txHashes) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += rctCond + logQuery
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
pgStr += ")"
// Filter on txHashes if there are any, and we are matching txs
if rctFilter.MatchTxs && len(txHashes) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
// If there are no contract addresses or topics to filter on,
// Filter on txHashes if there are any, and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
args = append(args, pq.Array(txHashes))
}
}
return pgStr, args
}
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
// filter parameters.
func (r *Retriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash, blockNumber *big.Int) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids with block hash", blockHash.String())
args := make([]interface{}, 0, 4)
id := 1
pgStr := RetrieveFilteredGQLLogs
args = append(args, blockHash.String())
id++
if blockNumber != nil {
pgStr += ` AND receipt_cids.block_number = $2`
id++
args = append(args, blockNumber.Int64())
}
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.block_number, log_cids.index`
logs := make([]LogResult, 0)
err := tx.Select(&logs, pgStr, args...)
if err != nil {
return nil, err
}
return logs, nil
}
// RetrieveFilteredLogsForBlock retrieves and returns all the log CIDs for the block that conform to the provided filter parameters.
func (r *Retriever) RetrieveFilteredLogsForBlock(db *sqlx.DB, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
return r.retrieveFilteredLogs(db, rctFilter, -1, -1, blockHash)
}
// RetrieveFilteredLogsForBlockRange retrieves and returns all the log CIDs for the blocks in the range that conform
// to the provided filter parameters.
func (r *Retriever) RetrieveFilteredLogsForBlockRange(db *sqlx.DB, rctFilter ReceiptFilter, startBlockNumber int64, stopBlockNumber int64) ([]LogResult, error) {
return r.retrieveFilteredLogs(db, rctFilter, startBlockNumber, stopBlockNumber, nil)
}
// retrieveFilteredLogs retrieves all the log CIDs either for a single block (by hash) or range of blocks (by number) which
// conform to the provided filter parameters.
func (r *Retriever) retrieveFilteredLogs(db *sqlx.DB, rctFilter ReceiptFilter, startBlockNumber int64, stopBlockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
var pgStr string
id := 1
if blockHash != nil {
pgStr = RetrieveFilteredLogsSingle
args = append(args, blockHash.String())
id++
} else {
pgStr = RetrieveFilteredLogsRange
args = append(args, startBlockNumber)
id++
args = append(args, stopBlockNumber)
id++
}
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY eth.log_cids.block_number, eth.log_cids.index`
logs := make([]LogResult, 0)
err := db.Select(&logs, pgStr, args...)
if err != nil {
return nil, err
}
// decode logs and extract original contract Data
for i, log := range logs {
var buf []interface{}
r := bytes.NewReader(log.LogLeafData)
if err := rlp.Decode(r, &buf); err != nil {
return nil, err
}
logs[i].Data = buf[2].([]byte)
}
return logs, nil
}
func hasTopics(topics [][]string) bool {
for _, topicSet := range topics {
if len(topicSet) > 0 {
return true
}
}
return false
}
// RetrieveBlockNumberByHash returns the block number for the given block hash
func (r *Retriever) RetrieveBlockNumberByHash(tx *sqlx.Tx, blockHash common.Hash) (uint64, error) {
log.Debug("retrieving block number for block hash ", blockHash.String())
pgStr := `SELECT CAST(block_number as TEXT) FROM eth.header_cids WHERE block_hash = $1`
var blockNumberStr string
if err := tx.Get(&blockNumberStr, pgStr, blockHash.String()); err != nil {
return 0, err
}
return strconv.ParseUint(blockNumberStr, 10, 64)
}
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
func (r *Retriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
var headerCIDs []HeaderCIDRecord
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := r.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
}).Joins("IPLD").Find(&headerCIDs, "header_cids.block_number = ?", blockNumber).Error
if err != nil {
log.Error("header cid retrieval error")
return nil, err
}
return headerCIDs, nil
}
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash (and optionally block number)
func (r *Retriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash, blockNumber *big.Int) (HeaderCIDRecord, error) {
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
var headerCIDs []HeaderCIDRecord
conditions := map[string]interface{}{"block_hash": blockHash.String()}
if blockNumber != nil {
conditions["header_cids.block_number"] = blockNumber.Int64()
}
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := r.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
}).Joins("IPLD").Find(&headerCIDs, conditions).Error
if err != nil {
log.Error("header cid retrieval error")
return HeaderCIDRecord{}, err
}
if len(headerCIDs) == 0 {
return HeaderCIDRecord{}, errHeaderHashNotFound
} else if len(headerCIDs) > 1 {
return HeaderCIDRecord{}, errMultipleHeadersForHash
}
return headerCIDs[0], nil
}
// RetrieveTxCIDByHash returns the tx for the given tx hash (and optionally block number)
func (r *Retriever) RetrieveTxCIDByHash(txHash string, blockNumber *big.Int) (TransactionCIDRecord, error) {
log.Debug("retrieving tx cid for tx hash ", txHash)
var txCIDs []TransactionCIDRecord
var err error
if blockNumber != nil {
err = r.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number)) AND transaction_cids.block_number = ?", txHash, blockNumber.Int64()).Error
} else {
err = r.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
}
if err != nil {
log.Error("tx retrieval error")
return TransactionCIDRecord{}, err
}
if len(txCIDs) == 0 {
return TransactionCIDRecord{}, errTxHashNotFound
} else if len(txCIDs) > 1 {
// a transaction can be part of a only one canonical block
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
}
return txCIDs[0], nil
}
var EmptyNodeValue = make([]byte, common.HashLength)
// RetrieveHeaderByHash returns the cid and rlp bytes for the header corresponding to the provided block hash
func (r *Retriever) RetrieveHeaderByHash(hash common.Hash) (string, []byte, error) {
headerResult := new(ipldResult)
return headerResult.CID, headerResult.Data, r.db.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
}
// RetrieveHeaderByHash2 returns the cid and rlp bytes for the header corresponding to the provided block hash
// using a sqlx.Tx
func (r *Retriever) RetrieveHeaderByHash2(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
headerResult := new(ipldResult)
return headerResult.CID, headerResult.Data, tx.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
}
// RetrieveUncles returns the cid and rlp bytes for the uncle list corresponding to the provided block hash, number (of non-omner root block)
func (r *Retriever) RetrieveUncles(tx *sqlx.Tx, hash common.Hash, number uint64) (string, []byte, error) {
uncleResult := new(ipldResult)
if err := tx.Get(uncleResult, RetrieveUnclesPgStr, hash.Hex(), number); err != nil {
return "", nil, err
}
return uncleResult.CID, uncleResult.Data, nil
}
// RetrieveUnclesByBlockHash returns the cid and rlp bytes for the uncle list corresponding to the provided block hash (of non-omner root block)
func (r *Retriever) RetrieveUnclesByBlockHash(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
uncleResult := new(ipldResult)
if err := tx.Get(uncleResult, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
return "", nil, err
}
return uncleResult.CID, uncleResult.Data, nil
}
// RetrieveTransactions returns the cids and rlp bytes for the transactions corresponding to the provided block hash, number
func (r *Retriever) RetrieveTransactions(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, error) {
txResults := make([]ipldResult, 0)
if err := tx.Select(&txResults, RetrieveTransactionsPgStr, hash.Hex(), number); err != nil {
return nil, nil, err
}
cids := make([]string, len(txResults))
txs := make([][]byte, len(txResults))
for i, res := range txResults {
cids[i] = res.CID
txs[i] = res.Data
}
return cids, txs, nil
}
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
func (r *Retriever) RetrieveTransactionsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
txResults := make([]ipldResult, 0)
if err := tx.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, err
}
cids := make([]string, len(txResults))
txs := make([][]byte, len(txResults))
for i, res := range txResults {
cids[i] = res.CID
txs[i] = res.Data
}
return cids, txs, nil
}
// DecodeLeafNode decodes the leaf node data
func DecodeLeafNode(node []byte) ([]byte, error) {
var nodeElements []interface{}
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
return nil, err
}
ok, err := IsLeaf(nodeElements)
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("expected leaf node but found %v", nodeElements)
}
return nodeElements[1].([]byte), nil
}
// RetrieveReceipts returns the cids and rlp bytes for the receipts corresponding to the provided block hash, number.
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *Retriever) RetrieveReceipts(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, []common.Hash, error) {
rctResults := make([]ipldResult, 0)
if err := tx.Select(&rctResults, RetrieveReceiptsPgStr, hash.Hex(), number); err != nil {
return nil, nil, nil, err
}
cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults))
txs := make([]common.Hash, len(rctResults))
for i, res := range rctResults {
cids[i] = res.CID
rcts[i] = res.Data
txs[i] = common.HexToHash(res.TxHash)
}
return cids, rcts, txs, nil
}
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *Retriever) RetrieveReceiptsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
rctResults := make([]ipldResult, 0)
if err := tx.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, nil, err
}
cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults))
txs := make([]common.Hash, len(rctResults))
for i, res := range rctResults {
cids[i] = res.CID
nodeVal, err := DecodeLeafNode(res.Data)
if err != nil {
return nil, nil, nil, err
}
rcts[i] = nodeVal
txs[i] = common.HexToHash(res.TxHash)
}
return cids, rcts, txs, nil
}
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
// TODO: ensure this handles deleted accounts appropriately
func (r *Retriever) RetrieveAccountByAddressAndBlockHash(address common.Address, hash common.Hash) (StateAccountRecord, error) {
var accountResult StateAccountRecord
leafKey := crypto.Keccak256Hash(address.Bytes())
if err := r.db.Get(&accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
return StateAccountRecord{}, err
}
if accountResult.Removed {
return StateAccountRecord{}, nil
}
return accountResult, nil
}
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash
func (r *Retriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) ([]byte, error) {
var storageResult nodeInfo
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
storageHash := crypto.Keccak256Hash(key.Bytes())
if err := r.db.Get(&storageResult,
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr,
stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
return nil, err
}
if storageResult.StateLeafRemoved || storageResult.Removed {
return EmptyNodeValue, nil
}
return storageResult.Value, nil
}
// RetrieveStorageAndRLP returns the cid and rlp bytes for the storage value corresponding to the
// provided address, storage slot, and block hash
func (r *Retriever) RetrieveStorageAndRLP(address common.Address, key, hash common.Hash) (string, []byte, error) {
var storageResult nodeInfo
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
storageHash := crypto.Keccak256Hash(key.Bytes())
if err := r.db.Get(&storageResult,
RetrieveStorageAndRLPByAddressHashAndLeafKeyAndBlockHashPgStr,
stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
return "", nil, err
}
if storageResult.StateLeafRemoved || storageResult.Removed {
return "", EmptyNodeValue, nil
}
return storageResult.CID, storageResult.Data, nil
}

View File

@ -1,182 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_test
import (
"context"
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
)
var _ = Describe("Retriever", func() {
var (
db *sqlx.DB
diffIndexer interfaces.StateDiffIndexer
retriever *eth.Retriever
ctx = context.Background()
)
BeforeEach(func() {
db = shared.SetupDB()
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
retriever = eth.NewRetriever(db)
})
AfterEach(func() {
shared.TearDownDB(db)
db.Close()
})
It("Retrieve", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
for _, node := range test_helpers.MockStateNodes {
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
})
Describe("RetrieveFirstBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveFirstBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the first block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the first block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the first block that has data in the database", func() {
payload1 := test_helpers.MockConvertedPayload
payload1.Block = newMockBlock(1010101)
payload2 := payload1
payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5)))
})
})
Describe("RetrieveLastBlockNumber", func() {
It("Throws an error if there are no blocks in the database", func() {
_, err := retriever.RetrieveLastBlockNumber()
Expect(err).To(HaveOccurred())
})
It("Gets the number of the latest block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
It("Gets the number of the latest block that has data in the database", func() {
payload1 := test_helpers.MockConvertedPayload
payload1.Block = newMockBlock(1010101)
payload2 := payload1
payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101)))
})
})
})
func newMockBlock(blockNumber uint64) *types.Block {
header := test_helpers.MockHeader
header.Number.SetUint64(blockNumber)
return types.NewBlock(&test_helpers.MockHeader, test_helpers.MockTransactions, nil, test_helpers.MockReceipts, trie.NewEmpty(nil))
}

View File

@ -1,277 +0,0 @@
package eth
const (
RetrieveHeaderByHashPgStr = `
SELECT header_cids.cid,
blocks.data
FROM ipld.blocks,
eth.header_cids
WHERE header_cids.block_hash = $1
AND header_cids.canonical
AND blocks.key = header_cids.cid
AND blocks.block_number = header_cids.block_number
LIMIT 1
`
RetrieveUnclesPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (
uncle_cids.header_id = header_cids.block_hash
AND uncle_cids.block_number = header_cids.block_number
)
INNER JOIN ipld.blocks ON (
uncle_cids.cid = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE header_cids.block_hash = $1
AND header_cids.block_number = $2
ORDER BY uncle_cids.parent_hash
LIMIT 1`
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (
uncle_cids.header_id = header_cids.block_hash
AND uncle_cids.block_number = header_cids.block_number
)
INNER JOIN ipld.blocks ON (
uncle_cids.cid = blocks.key
AND uncle_cids.block_number = blocks.block_number
)
WHERE header_cids.block_hash = $1
ORDER BY uncle_cids.parent_hash
LIMIT 1`
RetrieveTransactionsPgStr = `
SELECT transaction_cids.cid,
blocks.data
FROM eth.transaction_cids,
eth.header_cids,
ipld.blocks
WHERE header_cids.block_hash = $1
AND header_cids.block_number = $2
AND header_cids.canonical
AND transaction_cids.block_number = header_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND blocks.block_number = header_cids.block_number
AND blocks.key = transaction_cids.cid
ORDER BY eth.transaction_cids.index ASC
`
RetrieveTransactionsByBlockHashPgStr = `
SELECT transaction_cids.cid,
blocks.data
FROM eth.transaction_cids,
eth.header_cids,
ipld.blocks
WHERE header_cids.block_hash = $1
AND header_cids.canonical
AND transaction_cids.block_number = header_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND blocks.block_number = header_cids.block_number
AND blocks.key = transaction_cids.cid
ORDER BY eth.transaction_cids.index ASC
`
RetrieveReceiptsPgStr = `
SELECT receipt_cids.cid,
blocks.data,
eth.transaction_cids.tx_hash
FROM eth.receipt_cids,
eth.transaction_cids,
eth.header_cids,
ipld.blocks
WHERE header_cids.block_hash = $1
AND header_cids.block_number = $2
AND header_cids.canonical
AND receipt_cids.block_number = header_cids.block_number
AND receipt_cids.header_id = header_cids.block_hash
AND receipt_cids.TX_ID = transaction_cids.TX_HASH
AND transaction_cids.block_number = header_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND blocks.block_number = header_cids.block_number
AND blocks.key = receipt_cids.cid
ORDER BY eth.transaction_cids.index ASC
`
RetrieveReceiptsByBlockHashPgStr = `
SELECT receipt_cids.cid,
blocks.data,
eth.transaction_cids.tx_hash
FROM eth.receipt_cids,
eth.transaction_cids,
eth.header_cids,
ipld.blocks
WHERE header_cids.block_hash = $1
AND header_cids.canonical
AND receipt_cids.block_number = header_cids.block_number
AND receipt_cids.header_id = header_cids.block_hash
AND receipt_cids.TX_ID = transaction_cids.TX_HASH
AND transaction_cids.block_number = header_cids.block_number
AND transaction_cids.header_id = header_cids.block_hash
AND blocks.block_number = header_cids.block_number
AND blocks.key = receipt_cids.cid
ORDER BY eth.transaction_cids.index ASC
`
RetrieveAccountByLeafKeyAndBlockHashPgStr = `
SELECT state_cids.nonce,
state_cids.balance,
state_cids.storage_root,
state_cids.code_hash,
state_cids.removed
FROM eth.state_cids,
eth.header_cids
WHERE state_cids.state_leaf_key = $1
AND state_cids.block_number <=
(SELECT block_number
FROM eth.header_cids
WHERE block_hash = $2
LIMIT 1)
AND header_cids.canonical
AND state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
ORDER BY state_cids.block_number DESC
LIMIT 1
`
RetrieveFilteredGQLLogs = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.header_id as block_hash,
eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id, eth.log_cids.address,
eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, eth.receipt_cids.tx_id AS tx_hash
FROM eth.log_cids, eth.receipt_cids, ipld.blocks
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
AND eth.log_cids.header_id = receipt_cids.header_id
AND eth.log_cids.block_number = receipt_cids.block_number
AND log_cids.cid = blocks.key
AND log_cids.block_number = blocks.block_number
AND receipt_cids.header_id = $1`
RetrieveFilteredLogsRange = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
ipld.blocks.data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, log_cids.header_id AS block_hash
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, ipld.blocks
WHERE eth.log_cids.block_number >= $1 AND eth.log_cids.block_number <= $2
AND eth.log_cids.header_id IN (SELECT block_hash from eth.header_cids where eth.header_cids.block_number >= $1 AND eth.header_cids.block_number <= $2 and eth.header_cids.canonical)
AND eth.transaction_cids.block_number = eth.log_cids.block_number
AND eth.transaction_cids.header_id = eth.log_cids.header_id
AND eth.receipt_cids.block_number = eth.log_cids.block_number
AND eth.receipt_cids.header_id = eth.log_cids.header_id
AND eth.receipt_cids.tx_id = eth.log_cids.rct_id
AND eth.receipt_cids.tx_id = eth.transaction_cids.tx_hash
AND ipld.blocks.block_number = eth.log_cids.block_number
AND ipld.blocks.key = eth.log_cids.cid`
RetrieveFilteredLogsSingle = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
ipld.blocks.data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, log_cids.header_id AS block_hash
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, ipld.blocks
WHERE eth.log_cids.header_id = $1
AND eth.transaction_cids.block_number = eth.log_cids.block_number
AND eth.transaction_cids.header_id = eth.log_cids.header_id
AND eth.receipt_cids.block_number = eth.log_cids.block_number
AND eth.receipt_cids.header_id = eth.log_cids.header_id
AND eth.receipt_cids.tx_id = eth.log_cids.rct_id
AND eth.receipt_cids.tx_id = eth.transaction_cids.tx_hash
AND ipld.blocks.block_number = eth.log_cids.block_number
AND ipld.blocks.key = eth.log_cids.cid`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `
SELECT storage_cids.cid,
storage_cids.val,
storage_cids.block_number,
storage_cids.removed,
was_state_leaf_removed_by_number(storage_cids.state_leaf_key, storage_cids.block_number) AS state_leaf_removed
FROM eth.storage_cids, eth.header_cids
WHERE header_cids.block_number <= (SELECT block_number from eth.header_cids where block_hash = $3 LIMIT 1)
AND header_cids.canonical
AND storage_cids.block_number = header_cids.block_number
AND storage_cids.header_id = header_cids.block_hash
AND storage_cids.storage_leaf_key = $2
AND storage_cids.state_leaf_key = $1
ORDER BY storage_cids.block_number DESC LIMIT 1
`
RetrieveStorageAndRLPByAddressHashAndLeafKeyAndBlockHashPgStr = `
SELECT storage_cids.cid,
storage_cids.val,
storage_cids.block_number,
storage_cids.removed,
was_state_leaf_removed_by_number(storage_cids.state_leaf_key, storage_cids.block_number) AS state_leaf_removed,
blocks.data
FROM eth.storage_cids, eth.header_cids, ipld.blocks
WHERE header_cids.block_number <= (SELECT block_number from eth.header_cids where block_hash = $3 LIMIT 1)
AND header_cids.canonical
AND storage_cids.block_number = header_cids.block_number
AND storage_cids.header_id = header_cids.block_hash
AND storage_cids.storage_leaf_key = $2
AND storage_cids.state_leaf_key = $1
AND blocks.key = storage_cids.cid
AND blocks.block_number = storage_cids.block_number
ORDER BY storage_cids.block_number DESC LIMIT 1
`
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids WHERE block_number = $1 AND canonical`
RetrieveCanonicalHeaderByNumber = `
SELECT header_cids.cid,
blocks.data
FROM ipld.blocks,
eth.header_cids
WHERE header_cids.block_number = $1
AND header_cids.canonical
AND blocks.key = header_cids.cid
AND blocks.block_number = header_cids.block_number
LIMIT 1
`
RetrieveCanonicalHeaderAndHashByNumber = `
SELECT blocks.data,
header_cids.block_hash
FROM ipld.blocks,
eth.header_cids
WHERE header_cids.block_number = $1
AND header_cids.canonical
AND blocks.key = header_cids.cid
AND blocks.block_number = header_cids.block_number
LIMIT 1
`
RetrieveTD = `SELECT CAST(td as TEXT) FROM eth.header_cids
WHERE header_cids.block_hash = $1`
RetrieveRPCTransaction = `
SELECT blocks.data,
transaction_cids.header_id,
transaction_cids.block_number,
transaction_cids.index
FROM eth.transaction_cids,
ipld.blocks,
eth.header_cids
WHERE transaction_cids.TX_HASH = $1
AND header_cids.block_hash = transaction_cids.header_id
AND header_cids.block_number = transaction_cids.block_number
AND header_cids.canonical
AND blocks.key = transaction_cids.cid
AND blocks.block_number = transaction_cids.block_number
`
RetrieveCodeHashByLeafKeyAndBlockHash = `
SELECT state_cids.code_hash
FROM eth.state_cids,
eth.header_cids
WHERE
state_cids.state_leaf_key = $1
AND state_cids.block_number <=
(SELECT block_number
FROM eth.header_cids
WHERE block_hash = $2
LIMIT 1)
AND header_cids.canonical
AND state_cids.header_id = header_cids.block_hash
AND state_cids.block_number = header_cids.block_number
ORDER BY state_cids.block_number DESC
LIMIT 1
`
RetrieveCodeByKey = `SELECT data FROM ipld.blocks WHERE key = $1`
)
type ipldResult struct {
CID string `db:"cid"`
Data []byte `db:"data"`
TxHash string `db:"tx_hash"`
}
type nodeInfo struct {
CID string `db:"cid"`
Value []byte `db:"val"`
BlockNumber string `db:"block_number"`
Data []byte `db:"data"`
Removed bool `db:"removed"`
StateLeafRemoved bool `db:"state_leaf_removed"`
}

View File

@ -1,13 +0,0 @@
package eth_state_test
import (
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
. "github.com/onsi/gomega"
)
func CheckGetSliceResponse(sliceResponse eth.GetSliceResponse, expectedResponse eth.GetSliceResponse) {
Expect(sliceResponse.SliceID).To(Equal(expectedResponse.SliceID))
Expect(sliceResponse.TrieNodes).To(Equal(expectedResponse.TrieNodes))
Expect(sliceResponse.Leaves).To(Equal(expectedResponse.Leaves))
Expect(sliceResponse.MetaData.NodeStats).To(Equal(expectedResponse.MetaData.NodeStats))
}

View File

@ -1,837 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_state_test
import (
"bytes"
"context"
"fmt"
"math/big"
"os"
"github.com/cerc-io/plugeth-statediff"
"github.com/cerc-io/plugeth-statediff/adapt"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
)
var (
parsedABI abi.ABI
randomAddress = common.HexToAddress("0x9F4203bd7a11aCB94882050E6f1C3ab14BBaD3D9")
randomHash = crypto.Keccak256Hash(randomAddress.Bytes())
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
block1StateRoot = common.HexToHash("0xa1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21")
rootDataHashBlock1 = "a1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21"
rootDataBlock1 = "f871a0577652b625b77bdb5bf77bc43f3125cad7464d679d1575565277d3611b8053e780808080a0fe889f10e5db8f2c2bf355928152a17f6e3bb99a9241ac6d84c77e6264509c798080808080808080a011db0cda34a896dabeb6839bb06a38f49514cfa486435984eb013b7df9ee85c58080"
block5StateRoot = common.HexToHash("0x572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43")
rootDataHashBlock5 = "572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43"
rootDataBlock5 = "f8b1a0408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f280808080a0b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91a0180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e48080808080a0422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac995180a02d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba358080"
account1DataHash = "180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e4"
account1Data = "f869a03114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45b846f8440180a04bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0a0ba79854f3dbf6505fdbb085888e25fae8fa97288c5ce8fcd39aa589290d9a659"
account1StateLeafKey = "0x6114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45"
account1Code = "608060405234801561001057600080fd5b50600436106100415760003560e01c806343d726d61461004657806365f3c31a1461005057806373d4a13a1461007e575b600080fd5b61004e61009c565b005b61007c6004803603602081101561006657600080fd5b810190808035906020019092919050505061017b565b005b610086610185565b6040518082815260200191505060405180910390f35b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610141576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602281526020018061018c6022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018190555050565b6001548156fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a723158205ba91466129f45285f53176d805117208c231ec6343d7896790e6fc4165b802b64736f6c63430005110032"
account2DataHash = "2d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba35"
account2Data = "f871a03926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2b84ef84c02881bc16d674ec82710a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
account3DataHash = "408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f2"
account3Data = "f86da030bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2ab84af848058405f5b608a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
account4DataHash = "422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac9951"
account4Data = "f871a03957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45b84ef84c80883782dace9d9003e8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
account5DataHash = "b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91"
account5Data = "f871a03380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312ab84ef84c80883782dace9d900000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
contractStorageRootBlock5 = common.HexToHash("0x4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0")
storageRootDataHashBlock5 = "4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0"
storageRootDataBlock5 = "f838a120290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
contractStorageRootBlock4 = common.HexToHash("0x64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68")
storageRootDataHashBlock4 = "64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68"
storageRootDataBlock4 = "f8518080a08e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d558080808080808080a07b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af8080808080"
storageNode1DataHash = "7b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af"
storageNode1Data = "e2a0310e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf609"
storageNode2DataHash = "8e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d55"
storageNode2Data = "f7a0390decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
)
func init() {
// load abi
abiBytes, err := os.ReadFile("../test_helpers/abi.json")
if err != nil {
panic(err)
}
parsedABI, err = abi.JSON(bytes.NewReader(abiBytes))
if err != nil {
panic(err)
}
}
const chainLength = 5
var (
blocks []*types.Block
receipts []types.Receipts
chain *core.BlockChain
db *sqlx.DB
api *eth.PublicEthAPI
backend *eth.Backend
chainConfig = &*params.TestChainConfig
mockTD = big.NewInt(1337)
expectedCanonicalHeader map[string]interface{}
ctx = context.Background()
)
var _ = BeforeSuite(func() {
chainConfig.LondonBlock = big.NewInt(100)
// db and type initializations
var err error
db = shared.SetupDB()
backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
GroupCacheConfig: &shared.GroupCacheConfig{
StateDB: shared.GroupConfig{
Name: "eth_state_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, eth.APIConfig{StateDiffTimeout: shared.DefaultStateDiffTimeout})
// make the test blockchain (and state)
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen, chainConfig)
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
params := statediff.Params{}
canonicalHeader := blocks[1].Header()
expectedCanonicalHeader = map[string]interface{}{
"number": (*hexutil.Big)(canonicalHeader.Number),
"hash": canonicalHeader.Hash(),
"parentHash": canonicalHeader.ParentHash,
"nonce": canonicalHeader.Nonce,
"mixHash": canonicalHeader.MixDigest,
"sha3Uncles": canonicalHeader.UncleHash,
"logsBloom": canonicalHeader.Bloom,
"stateRoot": canonicalHeader.Root,
"miner": canonicalHeader.Coinbase,
"difficulty": (*hexutil.Big)(canonicalHeader.Difficulty),
"extraData": hexutil.Bytes([]byte{}),
"size": hexutil.Uint64(canonicalHeader.Size()),
"gasLimit": hexutil.Uint64(canonicalHeader.GasLimit),
"gasUsed": hexutil.Uint64(canonicalHeader.GasUsed),
"timestamp": hexutil.Uint64(canonicalHeader.Time),
"transactionsRoot": canonicalHeader.TxHash,
"receiptsRoot": canonicalHeader.ReceiptHash,
"totalDifficulty": (*hexutil.Big)(mockTD),
}
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
// NOTE: Nan-canonical blocks must come first, because the statediffer will assume the most recent block it is
// provided at a certain height is canonical. This is true inside geth, but not necessarily inside this test.
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
builder := statediff.NewBuilder(adapt.GethStateView(chain.StateCache()))
for i, block := range blocks {
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = receipts[i-1]
}
diff, err := builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred())
tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node, block.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
for _, ipld := range diff.IPLDs {
err = transformer.PushIPLD(tx, ipld)
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
}
})
var _ = AfterSuite(func() {
shared.TearDownDB(db)
chain.Stop()
})
var _ = Describe("eth state reading tests", func() {
Describe("eth_call", func() {
It("Applies call args (tx data) on top of state, returning the result (e.g. a Getter method call)", func() {
data, err := parsedABI.Pack("data")
Expect(err).ToNot(HaveOccurred())
bdata := hexutil.Bytes(data)
callArgs := eth.CallArgs{
To: &test_helpers.ContractAddr,
Data: &bdata,
}
// Before contract deployment, returns nil
res, err := api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(0), nil)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(BeNil())
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(1), nil)
Expect(err).ToNot(HaveOccurred())
Expect(res).To(BeNil())
// After deployment
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(2), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(3), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(4), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
Expect(res).To(Equal(expectedRes))
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(5), nil)
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"))
Expect(res).To(Equal(expectedRes))
})
})
Describe("eth_getBalance", func() {
var (
expectedContractBalance = (*hexutil.Big)(common.Big0)
expectedBankBalanceBlock0 = (*hexutil.Big)(test_helpers.TestBankFunds)
expectedAcct1BalanceBlock1 = (*hexutil.Big)(big.NewInt(10000))
expectedBankBalanceBlock1 = (*hexutil.Big)(new(big.Int).Sub(test_helpers.TestBankFunds, big.NewInt(10000)))
expectedAcct2BalanceBlock2 = (*hexutil.Big)(big.NewInt(1000))
expectedBankBalanceBlock2 = (*hexutil.Big)(new(big.Int).Sub(expectedBankBalanceBlock1.ToInt(), big.NewInt(1000)))
expectedAcct2BalanceBlock3 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock2.ToInt(), test_helpers.MiningReward))
expectedAcct2BalanceBlock4 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock3.ToInt(), test_helpers.MiningReward))
expectedAcct1BalanceBlock5 = (*hexutil.Big)(new(big.Int).Add(expectedAcct1BalanceBlock1.ToInt(), test_helpers.MiningReward))
)
It("Retrieves account balance by block number", func() {
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock0))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
})
It("Retrieves account balance by block hash", func() {
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock0))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2))
})
It("Returns 0 if account balance not found by block number", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
})
It("Returns 0 if account balance not found by block hash", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
})
})
Describe("eth_getCode", func() {
It("Retrieves the code for the provided contract address at the block with the provided number", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
})
It("Retrieves the code for the provided contract address at the block with the provided hash", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
})
It("Returns `nil` for an account it cannot find the code for", func() {
code, err := api.GetCode(ctx, randomAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(BeEmpty())
})
It("Returns `nil` for a contract that doesn't exist at this height", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred())
Expect(code).To(BeEmpty())
})
})
Describe("eth_getStorageAt", func() {
It("Returns empty slice if it tries to access a contract which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Returns empty slice if it tries to access a contract slot which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
Expect(err).NotTo(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
// After deployment
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred())
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred())
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
})
It("Throws an error for a non-existing block hash", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header for hash not found"))
})
It("Throws an error for a non-existing block number", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header not found"))
})
})
Describe("eth_getHeaderByNumber", func() {
It("Finds the canonical header based on the header's weight relative to others at the provided height", func() {
header, err := api.GetHeaderByNumber(ctx, number)
Expect(err).ToNot(HaveOccurred())
Expect(header).To(Equal(expectedCanonicalHeader))
})
})
Describe("eth_getSlice", func() {
It("Retrieves the state slice for root path", func() {
path := "0x"
depth := 3
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "1",
"02-total-trie-nodes": "6",
"03-leaves": "5",
"04-smart-contracts": "1",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{},
Head: map[string]string{
rootDataHashBlock5: rootDataBlock5,
},
Slice: map[string]string{
account1DataHash: account1Data,
account2DataHash: account2Data,
account3DataHash: account3Data,
account4DataHash: account4Data,
account5DataHash: account5Data,
},
},
Leaves: map[string]eth.GetSliceResponseAccount{
account1StateLeafKey: {
StorageRoot: contractStorageRootBlock5.Hex(),
EVMCode: account1Code,
},
},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the state slice for root path with 0 depth", func() {
path := "0x"
depth := 0
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "0",
"02-total-trie-nodes": "1",
"03-leaves": "0",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{},
Head: map[string]string{
rootDataHashBlock5: rootDataBlock5,
},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the state slice for a path to an account", func() {
path := "0x06"
depth := 2
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "2",
"01-max-depth": "0",
"02-total-trie-nodes": "2",
"03-leaves": "1",
"04-smart-contracts": "1",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{
rootDataHashBlock5: rootDataBlock5,
},
Head: map[string]string{
account1DataHash: account1Data,
},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{
account1StateLeafKey: {
StorageRoot: contractStorageRootBlock5.Hex(),
EVMCode: account1Code,
},
},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the state slice for a path to a non-existing account", func() {
path := "0x06"
depth := 2
sliceResponse, err := api.GetSlice(ctx, path, depth, block1StateRoot, false)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block1StateRoot.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "0",
"02-total-trie-nodes": "1",
"03-leaves": "0",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{
rootDataHashBlock1: rootDataBlock1,
},
Head: map[string]string{},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the storage slice for root path", func() {
path := "0x"
depth := 2
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "1",
"02-total-trie-nodes": "3",
"03-leaves": "2",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{},
Head: map[string]string{
storageRootDataHashBlock4: storageRootDataBlock4,
},
Slice: map[string]string{
storageNode1DataHash: storageNode1Data,
storageNode2DataHash: storageNode2Data,
},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the storage slice for root path with 0 depth", func() {
path := "0x"
depth := 0
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "0",
"02-total-trie-nodes": "1",
"03-leaves": "0",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{},
Head: map[string]string{
storageRootDataHashBlock4: storageRootDataBlock4,
},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the storage slice for root path with deleted nodes", func() {
path := "0x"
depth := 2
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock5, true)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock5.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "1",
"01-max-depth": "0",
"02-total-trie-nodes": "1",
"03-leaves": "1",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{},
Head: map[string]string{
storageRootDataHashBlock5: storageRootDataBlock5,
},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
It("Retrieves the storage slice for a path to a storage node", func() {
path := "0x0b"
depth := 2
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
Expect(err).ToNot(HaveOccurred())
expectedResponse := eth.GetSliceResponse{
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
MetaData: eth.GetSliceResponseMetadata{
NodeStats: map[string]string{
"00-stem-and-head-nodes": "2",
"01-max-depth": "0",
"02-total-trie-nodes": "2",
"03-leaves": "1",
"04-smart-contracts": "0",
},
},
TrieNodes: eth.GetSliceResponseTrieNodes{
Stem: map[string]string{
storageRootDataHashBlock4: storageRootDataBlock4,
},
Head: map[string]string{
storageNode1DataHash: storageNode1Data,
},
Slice: map[string]string{},
},
Leaves: map[string]eth.GetSliceResponseAccount{},
}
CheckGetSliceResponse(*sliceResponse, expectedResponse)
})
})
})

View File

@ -16,6 +16,38 @@
package eth
import (
"math/big"
"github.com/spf13/viper"
)
// SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the watcher
type SubscriptionSettings struct {
BackFill bool
BackFillOnly bool
Start *big.Int
End *big.Int // set to 0 or a negative value to have no ending block
HeaderFilter HeaderFilter
TxFilter TxFilter
ReceiptFilter ReceiptFilter
StateFilter StateFilter
StorageFilter StorageFilter
}
// HeaderFilter contains filter settings for headers
type HeaderFilter struct {
Off bool
Uncles bool
}
// TxFilter contains filter settings for txs
type TxFilter struct {
Off bool
Src []string
Dst []string
}
// ReceiptFilter contains filter settings for receipts
type ReceiptFilter struct {
Off bool
@ -24,3 +56,70 @@ type ReceiptFilter struct {
LogAddresses []string // receipt contains logs from the provided addresses
Topics [][]string
}
// StateFilter contains filter settings for state
type StateFilter struct {
Off bool
Addresses []string // is converted to state key by taking its keccak256 hash
IntermediateNodes bool
}
// StorageFilter contains filter settings for storage
type StorageFilter struct {
Off bool
Addresses []string
StorageKeys []string // need to be the hashs key themselves not slot position
IntermediateNodes bool
}
// Init is used to initialize a EthSubscription struct with env variables
func NewEthSubscriptionConfig() (*SubscriptionSettings, error) {
sc := new(SubscriptionSettings)
// Below default to false, which means we do not backfill by default
sc.BackFill = viper.GetBool("watcher.ethSubscription.historicalData")
sc.BackFillOnly = viper.GetBool("watcher.ethSubscription.historicalDataOnly")
// Below default to 0
// 0 start means we start at the beginning and 0 end means we continue indefinitely
sc.Start = big.NewInt(viper.GetInt64("watcher.ethSubscription.startingBlock"))
sc.End = big.NewInt(viper.GetInt64("watcher.ethSubscription.endingBlock"))
// Below default to false, which means we get all headers and no uncles by default
sc.HeaderFilter = HeaderFilter{
Off: viper.GetBool("watcher.ethSubscription.headerFilter.off"),
Uncles: viper.GetBool("watcher.ethSubscription.headerFilter.uncles"),
}
// Below defaults to false and two slices of length 0
// Which means we get all transactions by default
sc.TxFilter = TxFilter{
Off: viper.GetBool("watcher.ethSubscription.txFilter.off"),
Src: viper.GetStringSlice("watcher.ethSubscription.txFilter.src"),
Dst: viper.GetStringSlice("watcher.ethSubscription.txFilter.dst"),
}
// By default all of the topic slices will be empty => match on any/all topics
topics := make([][]string, 4)
topics[0] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic0s")
topics[1] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic1s")
topics[2] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic2s")
topics[3] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic3s")
sc.ReceiptFilter = ReceiptFilter{
Off: viper.GetBool("watcher.ethSubscription.receiptFilter.off"),
MatchTxs: viper.GetBool("watcher.ethSubscription.receiptFilter.matchTxs"),
LogAddresses: viper.GetStringSlice("watcher.ethSubscription.receiptFilter.contracts"),
Topics: topics,
}
// Below defaults to two false, and a slice of length 0
// Which means we get all state leafs by default, but no intermediate nodes
sc.StateFilter = StateFilter{
Off: viper.GetBool("watcher.ethSubscription.stateFilter.off"),
IntermediateNodes: viper.GetBool("watcher.ethSubscription.stateFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("watcher.ethSubscription.stateFilter.addresses"),
}
// Below defaults to two false, and two slices of length 0
// Which means we get all storage leafs by default, but no intermediate nodes
sc.StorageFilter = StorageFilter{
Off: viper.GetBool("watcher.ethSubscription.storageFilter.off"),
IntermediateNodes: viper.GetBool("watcher.ethSubscription.storageFilter.intermediateNodes"),
Addresses: viper.GetStringSlice("watcher.ethSubscription.storageFilter.addresses"),
StorageKeys: viper.GetStringSlice("watcher.ethSubscription.storageFilter.storageKeys"),
}
return sc, nil
}

View File

@ -14,16 +14,28 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_api_test
package eth
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
)
func TestETHSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ipld-eth-server/pkg/eth/api_test")
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
for _, tx := range txs {
if tx.CID == cid {
return true
}
}
return false
}
// ReceiptModelsContainsCID used to check if a list of ReceiptModel contains a specific cid string
func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
for _, rct := range rcts {
if rct.LeafCID == cid {
return true
}
}
return false
}

View File

@ -1,84 +0,0 @@
package test_helpers
import (
"context"
"math/big"
"github.com/cerc-io/plugeth-statediff"
"github.com/cerc-io/plugeth-statediff/adapt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
)
type IndexChainParams struct {
Blocks []*types.Block
Receipts []types.Receipts
StateCache state.Database
ChainConfig *params.ChainConfig
StateDiffParams statediff.Params
TotalDifficulty *big.Int
// Whether to skip indexing state nodes (state_cids, storage_cids)
SkipStateNodes bool
// Whether to skip indexing IPLD blocks
SkipIPLDs bool
}
func IndexChain(params IndexChainParams) error {
indexer := shared.SetupTestStateDiffIndexer(context.Background(), params.ChainConfig, Genesis.Hash())
builder := statediff.NewBuilder(adapt.GethStateView(params.StateCache))
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
for i, block := range params.Blocks {
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: params.Blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = params.Receipts[i-1]
}
diff, err := builder.BuildStateDiffObject(args, params.StateDiffParams)
if err != nil {
return err
}
tx, err := indexer.PushBlock(block, rcts, params.TotalDifficulty)
if err != nil {
return err
}
defer tx.RollbackOnFailure(err)
if !params.SkipStateNodes {
for _, node := range diff.Nodes {
if err = indexer.PushStateNode(tx, node, block.Hash().String()); err != nil {
return err
}
}
}
if !params.SkipIPLDs {
for _, ipld := range diff.IPLDs {
if err := indexer.PushIPLD(tx, ipld); err != nil {
return err
}
}
}
if err = tx.Submit(); err != nil {
return err
}
}
return nil
}

View File

@ -19,7 +19,6 @@ package test_helpers
import (
"math/big"
"github.com/cerc-io/plugeth-statediff/test_helpers"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
@ -28,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/test_helpers"
)
// Test variables
@ -36,8 +36,7 @@ var (
TestBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
TestBankAddress = crypto.PubkeyToAddress(TestBankKey.PublicKey) //0x71562b71999873DB5b286dF957af199Ec94617F7
TestBankFunds = big.NewInt(100000000)
Genesis = test_helpers.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds, big.NewInt(params.InitialBaseFee), params.MaxGasLimit)
Genesis = test_helpers.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds)
Account1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
Account2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
@ -62,12 +61,11 @@ data function sig: 73d4a13a
// MakeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent.
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen), config *params.ChainConfig) ([]*types.Block, []types.Receipts, *core.BlockChain) {
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) {
config := params.TestChainConfig
config.LondonBlock = big.NewInt(100)
blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
chain, err := core.NewBlockChain(Testdb, nil, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
panic(err)
}
chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
return append([]*types.Block{parent}, blocks...), receipts, chain
}

View File

@ -17,24 +17,28 @@
package test_helpers
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"math/big"
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
"github.com/cerc-io/plugeth-statediff/indexer/models"
sdtypes "github.com/cerc-io/plugeth-statediff/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
testhelpers "github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-block-format"
"github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
)
// Test variables
@ -60,7 +64,6 @@ var (
ReceiptHash: common.HexToHash("0x1"),
Difficulty: big.NewInt(500001),
Extra: []byte{},
ParentHash: Genesis.Hash(),
},
{
Time: 2,
@ -70,11 +73,12 @@ var (
ReceiptHash: common.HexToHash("0x2"),
Difficulty: big.NewInt(500002),
Extra: []byte{},
ParentHash: Genesis.Hash(),
},
}
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, trie.NewEmpty(nil))
MockChildHeader = types.Header{
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
MockChildHeader = types.Header{
Time: 0,
Number: new(big.Int).Add(BlockNumber, common.Big1),
Root: common.HexToHash("0x0"),
@ -84,23 +88,26 @@ var (
Extra: []byte{},
ParentHash: MockBlock.Header().Hash(),
}
MockChild = types.NewBlock(&MockChildHeader, MockTransactions, MockUncles, MockReceipts, trie.NewEmpty(nil))
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
mockTopic11 = common.HexToHash("0x04")
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
mockTopic31 = common.HexToHash("0x08")
mockTopic41 = common.HexToHash("0x09")
mockTopic42 = common.HexToHash("0x0a")
mockTopic43 = common.HexToHash("0x0b")
mockTopic51 = common.HexToHash("0x0c")
mockTopic61 = common.HexToHash("0x0d")
MockLog1 = &types.Log{
MockChild = types.NewBlock(&MockChildHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
mockTopic11 = common.HexToHash("0x04")
mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07")
mockTopic31 = common.HexToHash("0x08")
mockTopic41 = common.HexToHash("0x09")
mockTopic42 = common.HexToHash("0x0a")
mockTopic43 = common.HexToHash("0x0b")
mockTopic51 = common.HexToHash("0x0c")
mockTopic61 = common.HexToHash("0x0d")
MockLog1 = &types.Log{
Address: Address,
Topics: []common.Hash{mockTopic11, mockTopic12},
Data: []byte{},
@ -150,55 +157,179 @@ var (
Index: 5,
}
rctCIDs, _ = getReceiptCIDs(MockReceipts)
Rct1CID = rctCIDs[0]
Rct4CID = rctCIDs[3]
MockTrxMeta = []models.TxModel{
Tx1 = GetTxnRlp(0, MockTransactions)
Tx2 = GetTxnRlp(1, MockTransactions)
Tx3 = GetTxnRlp(2, MockTransactions)
Tx4 = GetTxnRlp(3, MockTransactions)
rctCIDs, rctIPLDData, _ = eth.GetRctLeafNodeData(MockReceipts)
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx1, multihash.KECCAK_256)
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx2, multihash.KECCAK_256)
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256)
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx4, multihash.KECCAK_256)
Trx4MhKey = shared.MultihashKeyFromCID(Trx4CID)
Rct1CID = rctCIDs[0]
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Rct2CID = rctCIDs[1]
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Rct3CID = rctCIDs[2]
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
Rct4CID = rctCIDs[3]
Rct4MhKey = shared.MultihashKeyFromCID(Rct4CID)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
State1MhKey = shared.MultihashKeyFromCID(State1CID)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
State2MhKey = shared.MultihashKeyFromCID(State2CID)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
Rct1IPLD = rctIPLDData[0]
Rct2IPLD = rctIPLDData[1]
Rct3IPLD = rctIPLDData[2]
Rct4IPLD = rctIPLDData[3]
MockTrxMeta = []models.TxModel{
{
CID: "", // This is empty until we go to publish to ipfs
MhKey: "",
Src: SenderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
Data: []byte{},
},
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
Data: []byte{},
},
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: "",
Index: 2,
TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode,
},
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: "",
Index: 3,
TxHash: MockTransactions[3].Hash().String(),
Data: []byte{},
},
}
MockTrxMetaPostPublsh = []models.TxModel{
{
BlockNumber: "1",
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
MhKey: Trx1MhKey,
Src: SenderAddr.Hex(),
Dst: Address.String(),
Index: 0,
TxHash: MockTransactions[0].Hash().String(),
Data: []byte{},
},
{
BlockNumber: "1",
CID: Trx2CID.String(),
MhKey: Trx2MhKey,
Src: SenderAddr.Hex(),
Dst: AnotherAddress.String(),
Index: 1,
TxHash: MockTransactions[1].Hash().String(),
Data: []byte{},
},
{
BlockNumber: "1",
CID: Trx3CID.String(),
MhKey: Trx3MhKey,
Src: SenderAddr.Hex(),
Dst: "",
Index: 2,
TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode,
},
{
BlockNumber: "1",
CID: Trx4CID.String(),
MhKey: Trx4MhKey,
Src: SenderAddr.Hex(),
Dst: AnotherAddress1.String(),
Index: 3,
TxHash: MockTransactions[3].Hash().String(),
Data: []byte{},
},
}
MockRctMeta = []models.ReceiptModel{
{
CID: "",
Contract: "",
LeafCID: "",
LeafMhKey: "",
Contract: "",
ContractHash: "",
},
{
CID: "",
Contract: "",
LeafCID: "",
LeafMhKey: "",
Contract: "",
ContractHash: "",
},
{
CID: "",
Contract: ContractAddress.String(),
LeafCID: "",
LeafMhKey: "",
Contract: ContractAddress.String(),
ContractHash: ContractHash,
},
{
CID: "",
Contract: "",
LeafCID: "",
LeafMhKey: "",
Contract: "",
ContractHash: "",
},
}
MockRctMetaPostPublish = []models.ReceiptModel{
{
BlockNumber: "1",
HeaderID: MockBlock.Hash().String(),
LeafCID: Rct1CID.String(),
LeafMhKey: Rct1MhKey,
Contract: "",
ContractHash: "",
},
{
BlockNumber: "1",
HeaderID: MockBlock.Hash().String(),
LeafCID: Rct2CID.String(),
LeafMhKey: Rct2MhKey,
Contract: "",
ContractHash: "",
},
{
BlockNumber: "1",
HeaderID: MockBlock.Hash().String(),
LeafCID: Rct3CID.String(),
LeafMhKey: Rct3MhKey,
Contract: ContractAddress.String(),
ContractHash: ContractHash,
},
{
BlockNumber: "1",
HeaderID: MockBlock.Hash().String(),
LeafCID: Rct4CID.String(),
LeafMhKey: Rct4MhKey,
Contract: "",
ContractHash: "",
},
}
@ -212,20 +343,21 @@ var (
StorageValue,
})
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
contractPath = common.Bytes2Hex([]byte{'\x06'})
ContractLeafKey = crypto.Keccak256(ContractAddress[:])
ContractAccount = types.StateAccount{
Nonce: uint64(1),
nonce1 = uint64(1)
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode)
contractPath = common.Bytes2Hex([]byte{'\x06'})
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
ContractAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
Nonce: nonce1,
Balance: big.NewInt(0),
CodeHash: CodeHash.Bytes(),
CodeHash: ContractCodeHash.Bytes(),
Root: common.HexToHash(ContractRoot),
}
ContractAccountRLP, _ = rlp.EncodeToBytes(&ContractAccount)
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
})
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
ContractPartialPath,
ContractAccountRLP,
ContractAccount,
})
nonce0 = uint64(0)
@ -233,49 +365,67 @@ var (
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
AccountLeafKey = crypto.Keccak256(AccountAddresss[:])
Account = types.StateAccount{
AccountLeafKey = testhelpers.Account2LeafKey
Account, _ = rlp.EncodeToBytes(&types.StateAccount{
Nonce: nonce0,
Balance: AccountBalance,
CodeHash: AccountCodeHash.Bytes(),
Root: common.HexToHash(AccountRoot),
}
AccountRLP, _ = rlp.EncodeToBytes(&Account)
})
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
AccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
AccountPartialPath,
AccountRLP,
Account,
})
MockStateNodes = []sdtypes.StateLeafNode{
MockStateNodes = []sdtypes.StateNode{
{
AccountWrapper: sdtypes.AccountWrapper{
Account: &ContractAccount,
LeafKey: ContractLeafKey,
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ContractLeafNode)).String(),
},
StorageDiff: []sdtypes.StorageLeafNode{
LeafKey: ContractLeafKey,
Path: []byte{'\x06'},
NodeValue: ContractLeafNode,
NodeType: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{
{
LeafKey: StorageLeafKey,
Value: StorageValue,
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String(),
Path: []byte{},
NodeType: sdtypes.Leaf,
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
},
},
},
{
AccountWrapper: sdtypes.AccountWrapper{
Account: &Account,
LeafKey: AccountLeafKey,
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(AccountLeafNode)).String(),
},
LeafKey: AccountLeafKey,
Path: []byte{'\x0c'},
NodeValue: AccountLeafNode,
NodeType: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{},
},
}
MockStorageNodes = map[string][]sdtypes.StorageLeafNode{
MockStateMetaPostPublish = []models.StateNodeModel{
{
BlockNumber: "1",
CID: State1CID.String(),
MhKey: State1MhKey,
Path: []byte{'\x06'},
NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
},
{
BlockNumber: "1",
CID: State2CID.String(),
MhKey: State2MhKey,
Path: []byte{'\x0c'},
NodeType: 2,
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
},
}
MockStorageNodes = map[string][]sdtypes.StorageNode{
contractPath: {
{
LeafKey: StorageLeafKey,
Value: StorageValue,
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String(),
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
NodeType: sdtypes.Leaf,
Path: []byte{},
},
},
}
@ -289,6 +439,149 @@ var (
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
MockConvertedPayloadForChild = eth.ConvertedPayload{
TotalDifficulty: MockChild.Difficulty(),
Block: MockChild,
Receipts: MockReceipts,
TxMetaData: MockTrxMeta,
ReceiptMetaData: MockRctMeta,
StorageNodes: MockStorageNodes,
StateNodes: MockStateNodes,
}
Reward = shared.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts)
MockCIDWrapper = &eth.CIDWrapper{
BlockNumber: new(big.Int).Set(BlockNumber),
Header: models.HeaderModel{
BlockNumber: "1",
BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: HeaderCID.String(),
MhKey: HeaderMhKey,
TotalDifficulty: MockBlock.Difficulty().String(),
Reward: Reward.String(),
StateRoot: MockBlock.Root().String(),
RctRoot: MockBlock.ReceiptHash().String(),
TxRoot: MockBlock.TxHash().String(),
UncleRoot: MockBlock.UncleHash().String(),
Bloom: MockBlock.Bloom().Bytes(),
Timestamp: MockBlock.Time(),
TimesValidated: 1,
Coinbase: "0x0000000000000000000000000000000000000000",
},
Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish,
Uncles: []models.UncleModel{},
StateNodes: MockStateMetaPostPublish,
StorageNodes: []models.StorageNodeWithStateKeyModel{
{
BlockNumber: "1",
Path: []byte{},
CID: StorageCID.String(),
MhKey: StorageMhKey,
NodeType: 2,
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
},
},
}
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID)
Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID)
Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID)
Trx4IPLD, _ = blocks.NewBlockWithCid(Tx4, Trx4CID)
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
MockIPLDs = eth.IPLDs{
BlockNumber: new(big.Int).Set(BlockNumber),
Header: models.IPLDModel{
BlockNumber: BlockNumber.String(),
Data: HeaderIPLD.RawData(),
Key: HeaderIPLD.Cid().String(),
},
Transactions: []models.IPLDModel{
{
BlockNumber: BlockNumber.String(),
Data: Trx1IPLD.RawData(),
Key: Trx1IPLD.Cid().String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Trx2IPLD.RawData(),
Key: Trx2IPLD.Cid().String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Trx3IPLD.RawData(),
Key: Trx3IPLD.Cid().String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Trx4IPLD.RawData(),
Key: Trx4IPLD.Cid().String(),
},
},
Receipts: []models.IPLDModel{
{
BlockNumber: BlockNumber.String(),
Data: Rct1IPLD,
Key: Rct1CID.String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Rct2IPLD,
Key: Rct2CID.String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Rct3IPLD,
Key: Rct3CID.String(),
},
{
BlockNumber: BlockNumber.String(),
Data: Rct4IPLD,
Key: Rct4CID.String(),
},
},
StateNodes: []eth.StateNode{
{
StateLeafKey: common.BytesToHash(ContractLeafKey),
Type: sdtypes.Leaf,
IPLD: models.IPLDModel{
BlockNumber: BlockNumber.String(),
Data: State1IPLD.RawData(),
Key: State1IPLD.Cid().String(),
},
Path: []byte{'\x06'},
},
{
StateLeafKey: common.BytesToHash(AccountLeafKey),
Type: sdtypes.Leaf,
IPLD: models.IPLDModel{
BlockNumber: BlockNumber.String(),
Data: State2IPLD.RawData(),
Key: State2IPLD.Cid().String(),
},
Path: []byte{'\x0c'},
},
},
StorageNodes: []eth.StorageNode{
{
StateLeafKey: common.BytesToHash(ContractLeafKey),
StorageLeafKey: common.BytesToHash(StorageLeafKey),
Type: sdtypes.Leaf,
IPLD: models.IPLDModel{
BlockNumber: BlockNumber.String(),
Data: StorageIPLD.RawData(),
Key: StorageIPLD.Cid().String(),
},
Path: []byte{},
},
},
}
LondonBlockNum = new(big.Int).Add(BlockNumber, big.NewInt(2))
MockLondonHeader = types.Header{
@ -323,7 +616,7 @@ var (
Extra: []byte{},
},
}
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, MockLondonUncles, MockLondonReceipts, trie.NewEmpty(nil))
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, MockLondonUncles, MockLondonReceipts, new(trie.Trie))
)
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block {
@ -340,7 +633,7 @@ func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*ty
// createDynamicTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transactions, types.Receipts, common.Address) {
// make transactions
config := *params.TestChainConfig
config := params.TestChainConfig
config.LondonBlock = blockNumber
trx1 := types.NewTx(&types.DynamicFeeTx{
ChainID: config.ChainID,
@ -353,7 +646,7 @@ func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transacti
Data: []byte{},
})
transactionSigner := types.MakeSigner(&config, blockNumber)
transactionSigner := types.MakeSigner(config, blockNumber)
mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
if err != nil {
@ -389,7 +682,7 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
// make transactions
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), ContractCode)
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
trx4 := types.NewTransaction(3, AnotherAddress1, big.NewInt(2000), 100, big.NewInt(200), []byte{})
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
mockCurve := elliptic.P256()
@ -449,14 +742,20 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4}, SenderAddr
}
func getReceiptCIDs(rcts []*types.Receipt) ([]cid.Cid, error) {
cids := make([]cid.Cid, len(rcts))
for i, rct := range rcts {
ethRct, err := ipld.NewReceipt(rct)
if err != nil {
return nil, err
}
cids[i] = ethRct.Cid()
}
return cids, nil
func GetTxnRlp(num int, txs types.Transactions) []byte {
buf := new(bytes.Buffer)
txs.EncodeIndex(num, buf)
tx := make([]byte, buf.Len())
copy(tx, buf.Bytes())
buf.Reset()
return tx
}
func GetRctRlp(num int, rcts types.Receipts) []byte {
buf := new(bytes.Buffer)
rcts.EncodeIndex(num, buf)
rct := make([]byte, buf.Len())
copy(rct, buf.Bytes())
buf.Reset()
return rct
}

View File

@ -1,49 +0,0 @@
package eth
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// TransactionArgs represents the arguments to construct a new transaction
// or a message call.
type TransactionArgs struct {
From *common.Address `json:"from"`
To *common.Address `json:"to"`
Gas *hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"`
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
Value *hexutil.Big `json:"value"`
Nonce *hexutil.Uint64 `json:"nonce"`
// We accept "data" and "input" for backwards-compatibility reasons.
// "input" is the newer name and should be preferred by clients.
// Issue detail: https://github.com/ethereum/go-ethereum/issues/15628
Data *hexutil.Bytes `json:"data"`
Input *hexutil.Bytes `json:"input"`
// Introduced by AccessListTxType transaction.
AccessList *types.AccessList `json:"accessList,omitempty"`
ChainID *hexutil.Big `json:"chainId,omitempty"`
}
// from retrieves the transaction sender address.
func (args *TransactionArgs) from() common.Address {
if args.From == nil {
return common.Address{}
}
return *args.From
}
// data retrieves the transaction calldata. Input field is preferred.
func (args *TransactionArgs) data() []byte {
if args.Input != nil {
return *args.Input
}
if args.Data != nil {
return *args.Data
}
return nil
}

View File

@ -18,18 +18,15 @@ package eth
import (
"errors"
"fmt"
"math/big"
"strconv"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/cerc-io/plugeth-statediff/indexer/models"
sdtypes "github.com/cerc-io/plugeth-statediff/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/sirupsen/logrus"
)
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
@ -126,10 +123,10 @@ func (arg *CallArgs) data() []byte {
// ToMessage converts the transaction arguments to the Message type used by the
// core evm. This method is used in calls and traces that do not require a real
// live transaction.
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Message, error) {
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) {
// Reject invalid combinations of pre- and post-1559 fee styles
if arg.GasPrice != nil && (arg.MaxFeePerGas != nil || arg.MaxPriorityFeePerGas != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
// Set sender address or use zero address if none specified.
addr := arg.from()
@ -143,7 +140,7 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Mes
gas = uint64(*arg.Gas)
}
if globalGasCap != 0 && globalGasCap < gas {
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
logrus.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
gas = globalGasCap
}
var (
@ -190,22 +187,51 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Mes
if arg.AccessList != nil {
accessList = *arg.AccessList
}
msg := &core.Message{
Nonce: 0,
GasLimit: gas,
GasPrice: gasPrice,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
To: arg.To,
Value: value,
Data: data,
AccessList: accessList,
SkipAccountChecks: true,
From: addr,
}
msg := types.NewMessage(addr, arg.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true)
return msg, nil
}
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
// Returned by IPLDFetcher and ResponseFilterer
type IPLDs struct {
BlockNumber *big.Int
TotalDifficulty *big.Int
Header models.IPLDModel
Uncles []models.IPLDModel
Transactions []models.IPLDModel
Receipts []models.IPLDModel
StateNodes []StateNode
StorageNodes []StorageNode
}
type StateNode struct {
Type sdtypes.NodeType
StateLeafKey common.Hash
Path []byte
IPLD models.IPLDModel
}
type StorageNode struct {
Type sdtypes.NodeType
StateLeafKey common.Hash
StorageLeafKey common.Hash
Path []byte
IPLD models.IPLDModel
}
// CIDWrapper is used to direct fetching of IPLDs from IPFS
// Returned by CIDRetriever
// Passed to IPLDFetcher
type CIDWrapper struct {
BlockNumber *big.Int
Header models.HeaderModel
Uncles []models.UncleModel
Transactions []models.TxModel
Receipts []models.ReceiptModel
StateNodes []models.StateNodeModel
StorageNodes []models.StorageNodeWithStateKeyModel
}
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
@ -215,13 +241,13 @@ type ConvertedPayload struct {
TxMetaData []models.TxModel
Receipts types.Receipts
ReceiptMetaData []models.ReceiptModel
StateNodes []sdtypes.StateLeafNode
StorageNodes map[string][]sdtypes.StorageLeafNode
StateNodes []sdtypes.StateNode
StorageNodes map[string][]sdtypes.StorageNode
}
// LogResult represent a log.
type LogResult struct {
LeafCID string `db:"cid"`
LeafCID string `db:"leaf_cid"`
ReceiptID string `db:"rct_id"`
Address string `db:"address"`
Index int64 `db:"index"`
@ -231,70 +257,10 @@ type LogResult struct {
Topic2 string `db:"topic2"`
Topic3 string `db:"topic3"`
LogLeafData []byte `db:"data"`
RctCID string `db:"rct_cid"`
RctCID string `db:"cid"`
RctStatus uint64 `db:"post_status"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
TxnIndex int64 `db:"txn_index"`
TxHash string `db:"tx_hash"`
}
// GetSliceResponse holds response for the eth_getSlice method
type GetSliceResponse struct {
SliceID string `json:"sliceId"`
MetaData GetSliceResponseMetadata `json:"metadata"`
TrieNodes GetSliceResponseTrieNodes `json:"trieNodes"`
Leaves map[string]GetSliceResponseAccount `json:"leaves"` // key: Keccak256Hash(address) in hex (leafKey)
}
func (sr *GetSliceResponse) init(path string, depth int, root common.Hash) {
sr.SliceID = fmt.Sprintf("%s-%d-%s", path, depth, root.String())
sr.MetaData = GetSliceResponseMetadata{
NodeStats: make(map[string]string, 0),
TimeStats: make(map[string]string, 0),
}
sr.Leaves = make(map[string]GetSliceResponseAccount)
sr.TrieNodes = GetSliceResponseTrieNodes{
Stem: make(map[string]string),
Head: make(map[string]string),
Slice: make(map[string]string),
}
}
func (sr *GetSliceResponse) populateMetaData(metaData metaDataFields) {
sr.MetaData.NodeStats["00-stem-and-head-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head))
sr.MetaData.NodeStats["01-max-depth"] = strconv.Itoa(metaData.maxDepth)
sr.MetaData.NodeStats["02-total-trie-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head) + len(sr.TrieNodes.Slice))
sr.MetaData.NodeStats["03-leaves"] = strconv.Itoa(metaData.leafCount)
sr.MetaData.NodeStats["04-smart-contracts"] = strconv.Itoa(len(sr.Leaves))
sr.MetaData.TimeStats["00-trie-loading"] = strconv.FormatInt(metaData.trieLoadingTime, 10)
sr.MetaData.TimeStats["01-fetch-stem-keys"] = strconv.FormatInt(metaData.stemNodesFetchTime, 10)
sr.MetaData.TimeStats["02-fetch-slice-keys"] = strconv.FormatInt(metaData.sliceNodesFetchTime, 10)
sr.MetaData.TimeStats["03-fetch-leaves-info"] = strconv.FormatInt(metaData.leavesFetchTime, 10)
}
type GetSliceResponseMetadata struct {
TimeStats map[string]string `json:"timeStats"` // stem, state, storage (one by one)
NodeStats map[string]string `json:"nodeStats"` // total, leaves, smart contracts
}
type GetSliceResponseTrieNodes struct {
Stem map[string]string `json:"stem"` // key: Keccak256Hash(data) in hex, value: trie node data in hex
Head map[string]string `json:"head"`
Slice map[string]string `json:"sliceNodes"`
}
type GetSliceResponseAccount struct {
StorageRoot string `json:"storageRoot"`
EVMCode string `json:"evmCode"`
}
type metaDataFields struct {
maxDepth int
leafCount int
trieLoadingTime int64
stemNodesFetchTime int64
sliceNodesFetchTime int64
leavesFetchTime int64
}

View File

@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@ -43,12 +42,12 @@ type IPFSBlockResponse struct {
}
type EthTransactionCIDResponse struct {
CID string `json:"cid"`
TxHash string `json:"txHash"`
Index int32 `json:"index"`
Src string `json:"src"`
Dst string `json:"dst"`
BlockByCid IPFSBlockResponse `json:"blockByCid"`
CID string `json:"cid"`
TxHash string `json:"txHash"`
Index int32 `json:"index"`
Src string `json:"src"`
Dst string `json:"dst"`
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
}
type EthTransactionCIDByTxHash struct {
@ -72,7 +71,7 @@ type EthHeaderCIDResponse struct {
UncleRoot string `json:"uncleRoot"`
Bloom string `json:"bloom"`
EthTransactionCIDsByHeaderId EthTransactionCIDsByHeaderIdResponse `json:"ethTransactionCidsByHeaderId"`
BlockByCid IPFSBlockResponse `json:"blockByCid"`
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
}
type AllEthHeaderCIDsResponse struct {
@ -92,16 +91,10 @@ func NewClient(endpoint string) *Client {
return &Client{client: client}
}
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, addresses []common.Address) ([]LogResponse, error) {
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
if addresses != nil {
addressStrings := make([]string, len(addresses))
for i, address := range addresses {
addressStrings[i] = fmt.Sprintf(`"%s"`, address.String())
}
params += fmt.Sprintf(`, addresses: [%s]`, strings.Join(addressStrings, ","))
if address != nil {
params += fmt.Sprintf(`, contract: "%s"`, address.String())
}
getLogsQuery := fmt.Sprintf(`query{
@ -195,7 +188,7 @@ func (c *Client) AllEthHeaderCIDs(ctx context.Context, condition EthHeaderCIDCon
receiptRoot
uncleRoot
bloom
blockByCid {
blockByMhKey {
key
data
}
@ -244,7 +237,7 @@ func (c *Client) EthTransactionCIDByTxHash(ctx context.Context, txHash string) (
index
src
dst
blockByCid {
blockByMhKey {
data
}
}

View File

@ -29,13 +29,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
state "github.com/cerc-io/ipld-eth-statedb/direct_by_leaf"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
var (
@ -51,7 +52,7 @@ type Account struct {
// getState fetches the StateDB object for an account.
func (a *Account) getState(ctx context.Context) (*state.StateDB, error) {
state, _, err := a.backend.IPLDDirectStateDBAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)
state, _, err := a.backend.StateAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)
return state, err
}
@ -243,7 +244,10 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account,
if err != nil || tx == nil {
return nil, err
}
signer := eth.SignerForTx(tx)
var signer types.Signer = types.HomesteadSigner{}
if tx.Protected() {
signer = types.NewEIP155Signer(tx.ChainId())
}
from, _ := types.Sender(signer, tx)
return &Account{
@ -834,7 +838,7 @@ func (b *Block) Call(ctx context.Context, args struct {
return nil, err
}
}
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap())
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap().Uint64())
if err != nil {
return nil, err
}
@ -1005,41 +1009,40 @@ func (r *Resolver) GetStorageAt(ctx context.Context, args struct {
Contract common.Address
Slot common.Hash
}) (*StorageResult, error) {
cid, nodeRLP, err := r.backend.Retriever.RetrieveStorageAndRLP(args.Contract, args.Slot, args.BlockHash)
cid, ipldBlock, rlpValue, err := r.backend.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(args.Contract, args.Slot, args.BlockHash)
if err != nil {
if err == sql.ErrNoRows {
return &StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}, nil
ret := StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}
return &ret, nil
}
return nil, err
}
valueRLP, err := eth.DecodeLeafNode(nodeRLP)
if err != nil {
return nil, err
}
if bytes.Equal(valueRLP, eth.EmptyNodeValue) {
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: nodeRLP}, nil
if bytes.Equal(rlpValue, eth.EmptyNodeValue) {
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: ipldBlock}, nil
}
var value interface{}
err = rlp.DecodeBytes(valueRLP, &value)
err = rlp.DecodeBytes(rlpValue, &value)
if err != nil {
return nil, err
}
return &StorageResult{value: value.([]byte), cid: cid, ipldBlock: nodeRLP}, nil
ret := StorageResult{value: value.([]byte), cid: cid, ipldBlock: ipldBlock}
return &ret, nil
}
func (r *Resolver) GetLogs(ctx context.Context, args struct {
BlockHash common.Hash
BlockNumber *BigInt
Addresses *[]common.Address
BlockHash common.Hash
Contract *common.Address
}) (*[]*Log, error) {
var filter eth.ReceiptFilter
if args.Addresses != nil {
filter.LogAddresses = make([]string, len(*args.Addresses))
for i, address := range *args.Addresses {
filter.LogAddresses[i] = address.String()
}
var filter eth.ReceiptFilter
if args.Contract != nil {
filter.LogAddresses = []string{args.Contract.String()}
}
// Begin tx
@ -1048,7 +1051,7 @@ func (r *Resolver) GetLogs(ctx context.Context, args struct {
return nil, err
}
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash, args.BlockNumber.ToInt())
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
if err != nil {
return nil, err
}
@ -1153,7 +1156,7 @@ func (t EthTransactionCID) Dst(ctx context.Context) string {
return t.dst
}
func (t EthTransactionCID) BlockByCid(ctx context.Context) IPFSBlock {
func (t EthTransactionCID) BlockByMhKey(ctx context.Context) IPFSBlock {
return t.ipfsBlock
}
@ -1242,7 +1245,7 @@ func (h EthHeaderCID) EthTransactionCidsByHeaderId(ctx context.Context) EthTrans
return EthTransactionCIDsConnection{nodes: h.transactions}
}
func (h EthHeaderCID) BlockByCid(ctx context.Context) IPFSBlock {
func (h EthHeaderCID) BlockByMhKey(ctx context.Context) IPFSBlock {
return h.ipfsBlock
}
@ -1265,7 +1268,7 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
var headerCIDs []eth.HeaderCIDRecord
var err error
if args.Condition.BlockHash != nil {
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash), args.Condition.BlockNumber.ToInt())
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
if err != nil {
if !strings.Contains(err.Error(), "not found") {
return nil, err
@ -1282,6 +1285,22 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
return nil, fmt.Errorf("provide block number or block hash")
}
// Begin tx
tx, err := r.backend.DB.Beginx()
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
var resultNodes []*EthHeaderCID
for _, headerCID := range headerCIDs {
var blockNumber BigInt
@ -1303,7 +1322,7 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
td: td,
txRoot: headerCID.TxRoot,
receiptRoot: headerCID.RctRoot,
uncleRoot: headerCID.UnclesHash,
uncleRoot: headerCID.UncleRoot,
bloom: Bytes(headerCID.Bloom).String(),
ipfsBlock: IPFSBlock{
key: headerCID.IPLD.Key,
@ -1330,12 +1349,9 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
}
func (r *Resolver) EthTransactionCidByTxHash(ctx context.Context, args struct {
TxHash string
BlockNumber *BigInt
TxHash string
}) (*EthTransactionCID, error) {
// Need not check args.BlockNumber for nil as .ToInt() uses a pointer receiver and returns nil if BlockNumber is nil
// https://stackoverflow.com/questions/42238624/calling-a-method-on-a-nil-struct-pointer-doesnt-panic-why-not
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash, args.BlockNumber.ToInt())
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash)
if err != nil {
return nil, err

View File

@ -17,13 +17,19 @@
package graphql_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestGraphQL(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "graphql test suite")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -22,10 +22,6 @@ import (
"math/big"
"strconv"
statediff "github.com/cerc-io/plugeth-statediff"
"github.com/cerc-io/plugeth-statediff/adapt"
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
sdtypes "github.com/cerc-io/plugeth-statediff/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
@ -34,148 +30,151 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v5/pkg/graphql"
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth/test_helpers"
"github.com/cerc-io/ipld-eth-server/v4/pkg/graphql"
"github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
const (
gqlEndPoint = "127.0.0.1:8083"
)
var (
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
blocks []*types.Block
receipts []types.Receipts
chain *core.BlockChain
db *sqlx.DB
backend *eth.Backend
graphQLServer *graphql.Service
chainConfig = &*params.TestChainConfig
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
mockTD = big.NewInt(1337)
ctx = context.Background()
nonCanonBlockHash common.Hash
nonCanonContractAddress common.Address
)
var _ = BeforeSuite(func() {
var err error
db = shared.SetupDB()
backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000),
GroupCacheConfig: &shared.GroupCacheConfig{
StateDB: shared.GroupConfig{
Name: "graphql_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
// make the test blockchain (and state)
chainConfig.LondonBlock = big.NewInt(100)
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen, chainConfig)
indexer := shared.SetupTestStateDiffIndexer(context.Background(), chainConfig, test_helpers.Genesis.Hash())
builder := statediff.NewBuilder(adapt.GethStateView(chain.StateCache()))
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
nonCanonBlockHash = test_helpers.MockBlock.Hash()
nonCanonContractAddress = test_helpers.ContractAddr
tx, err := indexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child
tx, err = indexer.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
ipld := sdtypes.IPLD{
CID: ipld.Keccak256ToCid(ipld.RawBinary, test_helpers.CodeHash.Bytes()).String(),
Content: test_helpers.ContractCode,
}
err = indexer.PushIPLD(tx, ipld)
Expect(err).ToNot(HaveOccurred())
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
for i, block := range blocks {
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = receipts[i-1]
}
tx, err = indexer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred())
defer tx.RollbackOnFailure(err)
diff, err := builder.BuildStateDiffObject(args, statediff.Params{})
Expect(err).ToNot(HaveOccurred())
for _, node := range diff.Nodes {
err = indexer.PushStateNode(tx, node, block.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
for _, ipld := range diff.IPLDs {
err = indexer.PushIPLD(tx, ipld)
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit()
Expect(err).ToNot(HaveOccurred())
}
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
Expect(err).ToNot(HaveOccurred())
err = graphQLServer.Start(nil)
Expect(err).ToNot(HaveOccurred())
})
var _ = AfterSuite(func() {
err := graphQLServer.Stop()
Expect(err).ToNot(HaveOccurred())
shared.TearDownDB(db)
chain.Stop()
})
var _ = Describe("GraphQL", func() {
const (
gqlEndPoint = "127.0.0.1:8083"
)
var (
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
blocks []*types.Block
receipts []types.Receipts
chain *core.BlockChain
db *sqlx.DB
blockHashes []common.Hash
backend *eth.Backend
graphQLServer *graphql.Service
chainConfig = params.TestChainConfig
mockTD = big.NewInt(1337)
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
ctx = context.Background()
blockHash common.Hash
contractAddress common.Address
)
It("test init", func() {
var err error
db = shared.SetupDB()
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000),
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "graphql_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred())
// make the test blockchain (and state)
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
params := statediff.Params{
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
}
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
builder := statediff.NewBuilder(chain.StateCache())
for i, block := range blocks {
blockHashes = append(blockHashes, block.Hash())
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = receipts[i-1]
}
var diff sdtypes.StateObject
diff, err = builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred())
tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred())
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node, block.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
}
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
blockHash = test_helpers.MockBlock.Hash()
contractAddress = test_helpers.ContractAddr
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred())
ccHash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
Expect(err).ToNot(HaveOccurred())
err = graphQLServer.Start(nil)
Expect(err).ToNot(HaveOccurred())
})
defer It("test teardown", func() {
err := graphQLServer.Stop()
Expect(err).ToNot(HaveOccurred())
shared.TearDownDB(db)
chain.Stop()
})
Describe("eth_getLogs", func() {
It("Retrieves logs that matches the provided blockHash and contract address", func() {
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{nonCanonContractAddress})
logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
@ -192,7 +191,7 @@ var _ = Describe("GraphQL", func() {
})
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{test_helpers.AnotherAddress2})
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
@ -208,38 +207,14 @@ var _ = Describe("GraphQL", func() {
Expect(logs).To(Equal(expectedLogs))
})
It("Retrieves logs that matches the provided blockHash and multiple contract addresses", func() {
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{nonCanonContractAddress, test_helpers.AnotherAddress2})
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
{
Topics: test_helpers.MockLog1.Topics,
Data: hexutil.Bytes(test_helpers.MockLog1.Data),
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[0].Hash()},
ReceiptCID: test_helpers.Rct1CID.String(),
Status: int32(test_helpers.MockReceipts[0].Status),
},
{
Topics: test_helpers.MockLog6.Topics,
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[3].Hash()},
ReceiptCID: test_helpers.Rct4CID.String(),
Status: int32(test_helpers.MockReceipts[3].Status),
},
}
Expect(logs).To(Equal(expectedLogs))
})
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
logs, err := client.GetLogs(ctx, nonCanonBlockHash, nil)
logs, err := client.GetLogs(ctx, blockHash, nil)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(6))
})
It("Retrieves logs with random hash", func() {
logs, err := client.GetLogs(ctx, randomHash, []common.Address{nonCanonContractAddress})
logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
})
@ -247,31 +222,31 @@ var _ = Describe("GraphQL", func() {
Describe("eth_getStorageAt", func() {
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash", func() {
storageRes, err := client.GetStorageAt(ctx, blocks[2].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
storageRes, err := client.GetStorageAt(ctx, blockHashes[2], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("01")))
storageRes, err = client.GetStorageAt(ctx, blocks[3].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
storageRes, err = client.GetStorageAt(ctx, blockHashes[3], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("03")))
storageRes, err = client.GetStorageAt(ctx, blocks[4].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
storageRes, err = client.GetStorageAt(ctx, blockHashes[4], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("09")))
})
It("Retrieves empty data if it tries to access a contract at a blockHash which does not exist", func() {
storageRes, err := client.GetStorageAt(ctx, blocks[0].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
storageRes, err := client.GetStorageAt(ctx, blockHashes[0], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
storageRes, err = client.GetStorageAt(ctx, blocks[1].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
storageRes, err = client.GetStorageAt(ctx, blockHashes[1], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
})
It("Retrieves empty data if it tries to access a contract slot which does not exist", func() {
storageRes, err := client.GetStorageAt(ctx, blocks[3].Hash(), nonCanonContractAddress, randomHash.Hex())
storageRes, err := client.GetStorageAt(ctx, blockHashes[3], contractAddress, randomHash.Hex())
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
})
@ -297,7 +272,7 @@ var _ = Describe("GraphQL", func() {
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockHash: &blockHash})
Expect(err).ToNot(HaveOccurred())
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash(), nil)
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash())
Expect(err).ToNot(HaveOccurred())
Expect(len(allEthHeaderCIDsResp.Nodes)).To(Equal(1))
@ -312,12 +287,12 @@ var _ = Describe("GraphQL", func() {
ethTransactionCIDResp, err := client.EthTransactionCIDByTxHash(ctx, txHash)
Expect(err).ToNot(HaveOccurred())
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash, nil)
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash)
Expect(err).ToNot(HaveOccurred())
compareEthTxCID(*ethTransactionCIDResp, txCID)
Expect(ethTransactionCIDResp.BlockByCid.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
Expect(ethTransactionCIDResp.BlockByMhKey.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
})
})
})
@ -338,7 +313,7 @@ func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID et
Expect(ethHeaderCID.Td).To(Equal(*new(graphql.BigInt).SetUint64(uint64(td))))
Expect(ethHeaderCID.TxRoot).To(Equal(headerCID.TxRoot))
Expect(ethHeaderCID.ReceiptRoot).To(Equal(headerCID.RctRoot))
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UnclesHash))
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UncleRoot))
Expect(ethHeaderCID.Bloom).To(Equal(graphql.Bytes(headerCID.Bloom).String()))
for tIdx, txCID := range headerCID.TransactionCIDs {
@ -346,8 +321,8 @@ func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID et
compareEthTxCID(ethTxCID, txCID)
}
Expect(ethHeaderCID.BlockByCid.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
Expect(ethHeaderCID.BlockByCid.Key).To(Equal(headerCID.IPLD.Key))
Expect(ethHeaderCID.BlockByMhKey.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
Expect(ethHeaderCID.BlockByMhKey.Key).To(Equal(headerCID.IPLD.Key))
}
func compareEthTxCID(ethTxCID graphql.EthTransactionCIDResponse, txCID eth.TransactionCIDRecord) {

View File

@ -292,7 +292,7 @@ const schema string = `
index: Int!
src: String!
dst: String!
blockByCid: IPFSBlock!
blockByMhKey: IPFSBlock!
}
type EthTransactionCidsConnection {
@ -317,7 +317,7 @@ const schema string = `
uncleRoot: String!
bloom: String!
ethTransactionCidsByHeaderId: EthTransactionCidsConnection!
blockByCid: IPFSBlock!
blockByMhKey: IPFSBlock!
}
type EthHeaderCidsConnection {
@ -343,12 +343,12 @@ const schema string = `
getStorageAt(blockHash: Bytes32!, contract: Address!, slot: Bytes32!): StorageResult
# Get contract logs by block hash and contract address.
getLogs(blockHash: Bytes32!, blockNumber: BigInt, addresses: [Address!]): [Log!]
getLogs(blockHash: Bytes32!, contract: Address): [Log!]
# PostGraphile alternative to get headers with transactions using block number or block hash.
allEthHeaderCids(condition: EthHeaderCidCondition): EthHeaderCidsConnection
# PostGraphile alternative to get transactions using transaction hash.
ethTransactionCidByTxHash(txHash: String!, blockNumber: BigInt): EthTransactionCid
ethTransactionCidByTxHash(txHash: String!): EthTransactionCid
}
`

View File

@ -27,9 +27,9 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
"github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
)
// Service encapsulates a GraphQL service.
@ -76,12 +76,12 @@ func (s *Service) Start(server *p2p.Server) error {
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
extapiURL := fmt.Sprintf("http://%v", addr)
log.Infof("GraphQL endpoint opened at %s", extapiURL)
extapiURL := fmt.Sprintf("http://%v/", addr)
logrus.Infof("graphQL endpoint opened for url %s", extapiURL)
return nil
}
// NewHandler returns a new `http.Handler` that will answer GraphQL queries.
// newHandler returns a new `http.Handler` that will answer GraphQL queries.
// It additionally exports an interactive query browser on the / endpoint.
func NewHandler(backend *eth.Backend) (http.Handler, error) {
q := Resolver{backend}
@ -105,7 +105,7 @@ func (s *Service) Stop() error {
if s.listener != nil {
s.listener.Close()
s.listener = nil
log.Debugf("graphQL endpoint closed for url %s", fmt.Sprintf("http://%s", s.endpoint))
logrus.Debugf("graphQL endpoint closed for url %s", fmt.Sprintf("http://%s", s.endpoint))
}
return nil
}

View File

@ -1,244 +0,0 @@
// Copyright © 2023 Cerc
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package log
import (
"context"
"fmt"
"io"
"os"
"runtime"
"strings"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
const (
CtxKeyApiMethod = "api_method"
CtxKeyApiParams = "api_params"
CtxKeyApiReqId = "api_reqid"
CtxKeyBlockHash = "block_hash"
CtxKeyBlockNumber = "block_num"
CtxKeyConn = "conn"
CtxKeyDuration = "duration"
CtxKeyError = "err"
CtxKeyUniqId = "uuid"
CtxKeyUserId = "user_id"
)
// TODO: Allow registering arbitrary keys.
var registeredKeys = []string{
CtxKeyApiMethod,
CtxKeyApiParams,
CtxKeyApiReqId,
CtxKeyBlockHash,
CtxKeyBlockNumber,
CtxKeyConn,
CtxKeyDuration,
CtxKeyError,
CtxKeyUniqId,
CtxKeyUserId,
}
const FatalLevel = logrus.FatalLevel
const ErrorLevel = logrus.ErrorLevel
const InfoLevel = logrus.InfoLevel
const DebugLevel = logrus.DebugLevel
const TraceLevel = logrus.TraceLevel
type Entry = logrus.Entry
type Level = logrus.Level
func WithFieldsFromContext(ctx context.Context) *Entry {
entry := logrus.WithContext(ctx)
for _, key := range registeredKeys {
if value := ctx.Value(key); value != nil {
entry = entry.WithField(key, value)
}
}
return entry
}
func Fatalx(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Fatal(args...)
}
func Errorx(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Error(args...)
}
func Warnx(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Warn(args...)
}
func Infox(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Info(args...)
}
func Debugx(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Debug(args...)
}
func Tracex(ctx context.Context, args ...interface{}) {
WithFieldsFromContext(ctx).Trace(args...)
}
func Errorxf(ctx context.Context, format string, args ...interface{}) {
WithFieldsFromContext(ctx).Errorf(format, args...)
}
func Warnxf(ctx context.Context, format string, args ...interface{}) {
WithFieldsFromContext(ctx).Warnf(format, args...)
}
func Infoxf(ctx context.Context, format string, args ...interface{}) {
WithFieldsFromContext(ctx).Infof(format, args...)
}
func Debugxf(ctx context.Context, format string, args ...interface{}) {
WithFieldsFromContext(ctx).Debugf(format, args...)
}
func Tracexf(ctx context.Context, format string, args ...interface{}) {
WithFieldsFromContext(ctx).Tracef(format, args...)
}
func Fatal(args ...interface{}) {
logrus.Fatal(args...)
}
func Error(args ...interface{}) {
logrus.Error(args...)
}
func Warn(args ...interface{}) {
logrus.Warn(args...)
}
func Info(args ...interface{}) {
logrus.Info(args...)
}
func Debug(args ...interface{}) {
logrus.Debug(args...)
}
func Trace(args ...interface{}) {
logrus.Trace(args...)
}
func Fatalf(format string, args ...interface{}) {
logrus.Fatalf(format, args...)
}
func Errorf(format string, args ...interface{}) {
logrus.Errorf(format, args...)
}
func Warnf(format string, args ...interface{}) {
logrus.Warnf(format, args...)
}
func Infof(format string, args ...interface{}) {
logrus.Infof(format, args...)
}
func Debugf(format string, args ...interface{}) {
logrus.Debugf(format, args...)
}
func Tracef(format string, args ...interface{}) {
logrus.Tracef(format, args...)
}
func SetOutput(out io.Writer) {
logrus.SetOutput(out)
}
func SetLevel(lvl Level) {
logrus.SetLevel(lvl)
}
func IsLevelEnabled(lvl Level) bool {
return logrus.IsLevelEnabled(lvl)
}
func WithError(err error) *Entry {
return logrus.WithError(err)
}
func WithField(field string, value interface{}) *Entry {
return logrus.WithField(field, value)
}
func Init() error {
// Set the output.
logFile := viper.GetString("log.file")
if logFile != "" {
file, err := os.OpenFile(logFile,
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640)
if err == nil {
Infof("Directing output to %s", logFile)
SetOutput(file)
} else {
SetOutput(os.Stdout)
Info("Failed to logrus.to file, using default stdout")
}
} else {
SetOutput(os.Stdout)
}
// Set the level.
lvl, err := logrus.ParseLevel(viper.GetString("log.level"))
if err != nil {
return err
}
SetLevel(lvl)
formatter := &logrus.TextFormatter{
FullTimestamp: true,
}
// Show file/line number only at Trace level.
if lvl >= TraceLevel {
logrus.SetReportCaller(true)
// We need to exclude this wrapper code, logrus.us itself, and the runtime from the stack to show anything useful.
// cf. https://github.com/sirupsen/logrus.us/pull/973
formatter.CallerPrettyfier = func(frame *runtime.Frame) (function string, file string) {
pcs := make([]uintptr, 50)
_ = runtime.Callers(0, pcs)
frames := runtime.CallersFrames(pcs)
// Filter logrus.wrapper / logrus.us / runtime frames.
for next, again := frames.Next(); again; next, again = frames.Next() {
if !strings.Contains(next.File, "sirupsen/logrus.us") &&
!strings.HasPrefix(next.Function, "runtime.") &&
!strings.Contains(next.File, "ipld-eth-server/pkg/log") {
return next.Function, fmt.Sprintf("%s:%d", next.File, next.Line)
}
}
// Fallback to the raw info.
return frame.Function, fmt.Sprintf("%s:%d", frame.File, frame.Line)
}
}
logrus.SetFormatter(formatter)
Info("Log level set to ", lvl.String())
return nil
}

View File

@ -17,10 +17,10 @@
package net_test
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/cerc-io/ipld-eth-server/v5/pkg/net"
"github.com/cerc-io/ipld-eth-server/v4/pkg/net"
)
var _ = Describe("API", func() {

View File

@ -20,9 +20,9 @@ import (
"io/ioutil"
"testing"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestNetSuite(t *testing.T) {
@ -31,5 +31,5 @@ func TestNetSuite(t *testing.T) {
}
var _ = BeforeSuite(func() {
log.SetOutput(ioutil.Discard)
logrus.SetOutput(ioutil.Discard)
})

View File

@ -17,107 +17,25 @@
package prom
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/google/uuid"
"github.com/ethereum/go-ethereum/rpc"
)
const (
jsonMethod = "method"
jsonParams = "params"
jsonReqId = "id"
headerUserId = "X-User-Id"
headerOriginalRemoteAddr = "X-Original-Remote-Addr"
)
// Peek at the request and update the Context accordingly (eg, API method, user ID, etc.)
func preprocessRequest(r *http.Request) (*http.Request, error) {
// Generate a unique ID for this request.
uniqId, err := uuid.NewUUID()
if nil != err {
return nil, err
}
// Read the body so that we can peek inside.
body, err := io.ReadAll(r.Body)
if nil != err {
return nil, err
}
// Replace it with a re-readable copy.
r.Body = io.NopCloser(bytes.NewBuffer(body))
// All API requests should be JSON.
var result map[string]interface{}
if len(body) > 0 {
err = json.Unmarshal(body, &result)
if nil != err {
return nil, err
}
}
// Pull out the method name, request ID, user ID, and address info.
reqId := fmt.Sprintf("%g", result[jsonReqId])
reqMethod := fmt.Sprintf("%v", result[jsonMethod])
reqParams := fmt.Sprintf("%v", result[jsonParams])
// Truncate parameters unless trace logging is enabled.
if !log.IsLevelEnabled(log.TraceLevel) {
if len(reqParams) > 250 {
reqParams = reqParams[:250] + "..."
}
}
userId := r.Header.Get(headerUserId)
conn := r.Header.Get(headerOriginalRemoteAddr)
if len(conn) == 0 {
conn = r.RemoteAddr
}
// Add it all to the request context.
ctx := r.Context()
ctx = context.WithValue(ctx, log.CtxKeyUniqId, uniqId.String())
ctx = context.WithValue(ctx, log.CtxKeyApiMethod, reqMethod)
ctx = context.WithValue(ctx, log.CtxKeyApiParams, string(reqParams))
ctx = context.WithValue(ctx, log.CtxKeyApiReqId, reqId)
ctx = context.WithValue(ctx, log.CtxKeyUserId, userId)
ctx = context.WithValue(ctx, log.CtxKeyConn, conn)
return r.WithContext(ctx), nil
}
// HTTPMiddleware http connection metric reader
func HTTPMiddleware(next http.Handler) http.Handler {
if !metrics {
return next
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpCount.Inc()
start := time.Now()
r, err := preprocessRequest(r)
if nil != err {
log.WithError(err).Error("Error preprocessing request")
w.WriteHeader(http.StatusBadRequest)
return
}
ctx := r.Context()
apiMethod := fmt.Sprintf("%s", ctx.Value(log.CtxKeyApiMethod))
if metrics {
httpCount.WithLabelValues(apiMethod).Inc()
}
log.Debugx(ctx, "START")
next.ServeHTTP(w, r)
duration := time.Now().Sub(start)
log.Debugxf(context.WithValue(ctx, log.CtxKeyDuration, duration.Milliseconds()), "END")
if metrics {
httpDuration.WithLabelValues(apiMethod).Observe(duration.Seconds())
}
httpDuration.Observe(float64(duration.Seconds()))
})
}

View File

@ -33,8 +33,8 @@ const (
var (
metrics bool
httpCount *prometheus.CounterVec
httpDuration *prometheus.HistogramVec
httpCount prometheus.Counter
httpDuration prometheus.Histogram
wsCount prometheus.Gauge
ipcCount prometheus.Gauge
)
@ -43,19 +43,18 @@ var (
func Init() {
metrics = true
httpCount = promauto.NewCounterVec(prometheus.CounterOpts{
httpCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystemHTTP,
Name: "count",
Help: "http request count",
}, []string{"method"})
httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
})
httpDuration = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystemHTTP,
Name: "duration",
Help: "http request duration",
}, []string{"method"})
})
wsCount = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,

View File

@ -20,8 +20,8 @@ import (
"errors"
"net/http"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/sirupsen/logrus"
)
var errPromHTTP = errors.New("can't start http server for prometheus")
@ -36,7 +36,7 @@ func Serve(addr string) *http.Server {
}
go func() {
if err := srv.ListenAndServe(); err != nil {
log.
logrus.
WithError(err).
WithField("module", "prom").
WithField("addr", addr).

View File

@ -19,12 +19,10 @@ package rpc
import (
"fmt"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
log "github.com/sirupsen/logrus"
)
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
@ -35,15 +33,15 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors [
if err != nil {
utils.Fatalf("Could not register HTTP API: %w", err)
}
handler := prom.HTTPMiddleware(node.NewHTTPHandlerStack(srv, cors, vhosts, nil))
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
// start http server
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)
}
extapiURL := fmt.Sprintf("http://%s", addr)
log.Infof("HTTP endpoint opened at %s", extapiURL)
extapiURL := fmt.Sprintf("http://%v/", addr)
log.Infof("HTTP endpoint opened %s", extapiURL)
return srv, err
}

View File

@ -22,11 +22,10 @@ import (
"os"
"path/filepath"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rpc"
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
log "github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
)
var (

View File

@ -24,8 +24,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
)
// StartWSEndpoint starts a websocket endpoint.
@ -50,7 +49,6 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
wsServer := NewWSServer(wsOrigins, handler)
wsServer.Handler = prom.WSMiddleware(wsServer.Handler)
go wsServer.Serve(listener)
log.Infof("WS endpoint opened at ws://%s", endpoint)
return listener, handler, err

View File

@ -17,8 +17,13 @@
package serve
import (
"github.com/cerc-io/plugeth-statediff/types"
"context"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/types"
log "github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
)
// APIName is the namespace used for the state diffing service API
@ -41,11 +46,49 @@ func NewPublicServerAPI(w Server, client *rpc.Client) *PublicServerAPI {
}
}
// Stream is the public method to setup a subscription that fires off IPLD payloads as they are processed
func (api *PublicServerAPI) Stream(ctx context.Context, params eth.SubscriptionSettings) (*rpc.Subscription, error) {
// ensure that the RPC connection supports subscriptions
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, rpc.ErrNotificationsUnsupported
}
// create subscription and start waiting for stream events
rpcSub := notifier.CreateSubscription()
go func() {
// subscribe to events from the SyncPublishScreenAndServe service
payloadChannel := make(chan SubscriptionPayload, PayloadChanBufferSize)
quitChan := make(chan bool, 1)
go api.w.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
// loop and await payloads and relay them to the subscriber using notifier
for {
select {
case packet := <-payloadChannel:
if err := notifier.Notify(rpcSub.ID, packet); err != nil {
log.Error("Failed to send watcher data packet", "err", err)
api.w.Unsubscribe(rpcSub.ID)
return
}
case <-rpcSub.Err():
api.w.Unsubscribe(rpcSub.ID)
return
case <-quitChan:
// don't need to unsubscribe from the watcher, the service does so before sending the quit signal this way
return
}
}
}()
return rpcSub, nil
}
// WatchAddress makes a geth WatchAddress API call with the given operation and args
func (api *PublicServerAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error {
err := api.rpc.Call(nil, "statediff_watchAddress", operation, args)
if err != nil {
// Return the error directly to match the native Geth API
return err
}

View File

@ -24,61 +24,37 @@ import (
"path/filepath"
"time"
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
"github.com/cerc-io/plugeth-statediff/indexer/node"
"github.com/cerc-io/plugeth-statediff/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/jmoiron/sqlx"
"github.com/spf13/viper"
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
ethServerShared "github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
"github.com/cerc-io/ipld-eth-server/v4/pkg/prom"
ethServerShared "github.com/cerc-io/ipld-eth-server/v4/pkg/shared"
)
// Env variables
const (
SERVER_WS_PATH = "SERVER_WS_PATH"
SERVER_IPC_PATH = "SERVER_IPC_PATH"
SERVER_HTTP_PATH = "SERVER_HTTP_PATH"
SERVER_GRAPHQL_PATH = "SERVER_GRAPHQL_PATH"
SERVER_WS_PATH = "SERVER_WS_PATH"
SERVER_IPC_PATH = "SERVER_IPC_PATH"
SERVER_HTTP_PATH = "SERVER_HTTP_PATH"
SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS"
SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS"
SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME"
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
ETH_STATEDIFF_TIMEOUT = "ETH_STATEDIFF_TIMEOUT"
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
ETH_FORWARD_GET_STORAGE_AT = "ETH_FORWARD_GET_STORAGE_AT"
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
ETH_GETLOGS_BLOCK_LIMIT = "ETH_GETLOGS_BLOCK_LIMIT"
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
VALIDATOR_ENABLED = "VALIDATOR_ENABLED"
VALIDATOR_EVERY_NTH_BLOCK = "VALIDATOR_EVERY_NTH_BLOCK"
HTTP_TIMEOUT = "HTTP_TIMEOUT"
ETH_WS_PATH = "ETH_WS_PATH"
ETH_HTTP_PATH = "ETH_HTTP_PATH"
ETH_NODE_ID = "ETH_NODE_ID"
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
ETH_NETWORK_ID = "ETH_NETWORK_ID"
ETH_CHAIN_ID = "ETH_CHAIN_ID"
DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD"
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
)
// Config struct
@ -98,18 +74,21 @@ type Config struct {
EthGraphqlEnabled bool
EthGraphqlEndpoint string
ChainConfig *params.ChainConfig
DefaultSender *common.Address
RPCGasCap *big.Int
EthHttpEndpoint string
Client *rpc.Client
SupportStateDiff bool
StateDiffTimeout time.Duration
ForwardEthCalls bool
ForwardGetStorageAt bool
ProxyOnError bool
GetLogsBlockLimit int64
NodeNetworkID string
IpldGraphqlEnabled bool
IpldGraphqlEndpoint string
IpldPostgraphileEndpoint string
TracingHttpEndpoint string
TracingPostgraphileEndpoint string
ChainConfig *params.ChainConfig
DefaultSender *common.Address
RPCGasCap *big.Int
EthHttpEndpoint string
Client *rpc.Client
SupportStateDiff bool
ForwardEthCalls bool
ProxyOnError bool
NodeNetworkID string
// Cache configuration.
GroupCache *ethServerShared.GroupCacheConfig
@ -123,22 +102,13 @@ type Config struct {
func NewConfig() (*Config, error) {
c := new(Config)
viper.BindEnv("server.httpPath", SERVER_HTTP_PATH)
viper.BindEnv("server.wsPath", SERVER_WS_PATH)
viper.BindEnv("server.ipcPath", SERVER_IPC_PATH)
viper.BindEnv("server.graphqlPath", SERVER_GRAPHQL_PATH)
viper.BindEnv("ethereum.httpPath", ETH_HTTP_PATH)
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR)
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP)
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG)
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF)
viper.BindEnv("ethereum.stateDiffTimeout", ETH_STATEDIFF_TIMEOUT)
viper.BindEnv("ethereum.forwardEthCalls", ETH_FORWARD_ETH_CALLS)
viper.BindEnv("ethereum.forwardGetStorageAt", ETH_FORWARD_GET_STORAGE_AT)
viper.BindEnv("ethereum.proxyOnError", ETH_PROXY_ON_ERROR)
viper.BindEnv("ethereum.getLogsBlockLimit", ETH_GETLOGS_BLOCK_LIMIT)
viper.BindEnv("log.file", "LOG_FILE")
viper.BindEnv("log.level", "LOG_LEVEL")
c.dbInit()
ethHTTP := viper.GetString("ethereum.httpPath")
@ -151,20 +121,13 @@ func NewConfig() (*Config, error) {
c.Client = cli
c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff")
c.ForwardEthCalls = viper.GetBool("ethereum.forwardEthCalls")
c.ForwardGetStorageAt = viper.GetBool("ethereum.forwardGetStorageAt")
c.ProxyOnError = viper.GetBool("ethereum.proxyOnError")
c.EthHttpEndpoint = ethHTTPEndpoint
if viper.IsSet("ethereum.getLogsBlockLimit") {
c.GetLogsBlockLimit = viper.GetInt64("ethereum.getLogsBlockLimit")
} else {
c.GetLogsBlockLimit = 500
}
// websocket server
wsEnabled := viper.GetBool("server.ws")
wsEnabled := viper.GetBool("eth.server.ws")
if wsEnabled {
wsPath := viper.GetString("server.wsPath")
wsPath := viper.GetString("eth.server.wsPath")
if wsPath == "" {
wsPath = "127.0.0.1:8080"
}
@ -173,9 +136,9 @@ func NewConfig() (*Config, error) {
c.WSEnabled = wsEnabled
// ipc server
ipcEnabled := viper.GetBool("server.ipc")
ipcEnabled := viper.GetBool("eth.server.ipc")
if ipcEnabled {
ipcPath := viper.GetString("server.ipcPath")
ipcPath := viper.GetString("eth.server.ipcPath")
if ipcPath == "" {
home, err := os.UserHomeDir()
if err != nil {
@ -188,9 +151,9 @@ func NewConfig() (*Config, error) {
c.IPCEnabled = ipcEnabled
// http server
httpEnabled := viper.GetBool("server.http")
httpEnabled := viper.GetBool("eth.server.http")
if httpEnabled {
httpPath := viper.GetString("server.httpPath")
httpPath := viper.GetString("eth.server.httpPath")
if httpPath == "" {
httpPath = "127.0.0.1:8081"
}
@ -199,9 +162,9 @@ func NewConfig() (*Config, error) {
c.HTTPEnabled = httpEnabled
// eth graphql endpoint
ethGraphqlEnabled := viper.GetBool("server.graphql")
ethGraphqlEnabled := viper.GetBool("eth.server.graphql")
if ethGraphqlEnabled {
ethGraphqlPath := viper.GetString("server.graphqlPath")
ethGraphqlPath := viper.GetString("eth.server.graphqlPath")
if ethGraphqlPath == "" {
ethGraphqlPath = "127.0.0.1:8082"
}
@ -209,6 +172,34 @@ func NewConfig() (*Config, error) {
}
c.EthGraphqlEnabled = ethGraphqlEnabled
// ipld graphql endpoint
ipldGraphqlEnabled := viper.GetBool("ipld.server.graphql")
if ipldGraphqlEnabled {
ipldGraphqlPath := viper.GetString("ipld.server.graphqlPath")
if ipldGraphqlPath == "" {
ipldGraphqlPath = "127.0.0.1:8083"
}
c.IpldGraphqlEndpoint = ipldGraphqlPath
ipldPostgraphilePath := viper.GetString("ipld.postgraphilePath")
if ipldPostgraphilePath == "" {
return nil, errors.New("ipld-postgraphile-path parameter is empty")
}
c.IpldPostgraphileEndpoint = ipldPostgraphilePath
tracingHttpEndpoint := viper.GetString("tracing.httpPath")
tracingPostgraphilePath := viper.GetString("tracing.postgraphilePath")
// these two parameters either can be both empty or both set
if (tracingHttpEndpoint == "" && tracingPostgraphilePath != "") || (tracingHttpEndpoint != "" && tracingPostgraphilePath == "") {
return nil, errors.New("tracing.httpPath and tracing.postgraphilePath parameters either can be both empty or both set")
}
c.TracingHttpEndpoint = tracingHttpEndpoint
c.TracingPostgraphileEndpoint = tracingPostgraphilePath
}
c.IpldGraphqlEnabled = ipldGraphqlEnabled
overrideDBConnConfig(&c.DBConfig)
serveDB, err := ethServerShared.NewDB(c.DBConfig.DbConnectionString(), c.DBConfig)
if err != nil {
@ -218,30 +209,22 @@ func NewConfig() (*Config, error) {
prom.RegisterDBCollector(c.DBConfig.DatabaseName, serveDB)
c.DB = serveDB
defaultSenderStr := viper.GetString("ethereum.defaultSender")
if defaultSenderStr != "" {
sender := common.HexToAddress(defaultSenderStr)
c.DefaultSender = &sender
}
rpcGasCapStr := viper.GetString("ethereum.rpcGasCap")
if rpcGasCapStr != "" {
if rpcGasCap, ok := new(big.Int).SetString(rpcGasCapStr, 10); ok {
c.RPCGasCap = rpcGasCap
}
} else {
c.RPCGasCap = big.NewInt(0)
}
if sdTimeout := viper.GetString("ethereum.stateDiffTimeout"); sdTimeout != "" {
var err error
if c.StateDiffTimeout, err = time.ParseDuration(sdTimeout); err != nil {
return nil, err
}
} else {
c.StateDiffTimeout = ethServerShared.DefaultStateDiffTimeout
}
if c.StateDiffTimeout < 0 {
return nil, errors.New("ethereum.stateDiffTimeout < 0")
}
chainConfigPath := viper.GetString("ethereum.chainConfig")
if chainConfigPath != "" {
c.ChainConfig, err = utils.LoadConfig(chainConfigPath)
c.ChainConfig, err = statediff.LoadConfig(chainConfigPath)
} else {
c.ChainConfig, err = utils.ChainConfig(nodeInfo.ChainID)
c.ChainConfig, err = statediff.ChainConfig(nodeInfo.ChainID)
}
c.loadGroupCacheConfig()
@ -310,24 +293,3 @@ func (c *Config) loadValidatorConfig() {
c.StateValidationEnabled = viper.GetBool("validator.enabled")
c.StateValidationEveryNthBlock = viper.GetUint64("validator.everyNthBlock")
}
// GetEthNodeAndClient returns eth node info and client from path url
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
rpcClient, err := rpc.Dial(path)
if err != nil {
return node.Info{}, nil, err
}
return node.Info{
ID: viper.GetString("ethereum.nodeID"),
ClientName: viper.GetString("ethereum.clientName"),
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
NetworkID: viper.GetString("ethereum.networkID"),
ChainID: viper.GetUint64("ethereum.chainID"),
}, rpcClient, nil
}

50
pkg/serve/env.go Normal file
View File

@ -0,0 +1,50 @@
package serve
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/spf13/viper"
)
// Env variables
const (
HTTP_TIMEOUT = "HTTP_TIMEOUT"
ETH_WS_PATH = "ETH_WS_PATH"
ETH_HTTP_PATH = "ETH_HTTP_PATH"
ETH_NODE_ID = "ETH_NODE_ID"
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
ETH_NETWORK_ID = "ETH_NETWORK_ID"
ETH_CHAIN_ID = "ETH_CHAIN_ID"
DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD"
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
)
// GetEthNodeAndClient returns eth node info and client from path url
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
rpcClient, err := rpc.Dial(path)
if err != nil {
return node.Info{}, nil, err
}
return node.Info{
ID: viper.GetString("ethereum.nodeID"),
ClientName: viper.GetString("ethereum.clientName"),
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
NetworkID: viper.GetString("ethereum.networkID"),
ChainID: viper.GetUint64("ethereum.chainID"),
}, rpcClient, nil
}

View File

@ -14,16 +14,24 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eth_state_test
package serve
import (
"testing"
import log "github.com/sirupsen/logrus"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestETHSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "ipld-eth-server/pkg/eth/state_test")
func sendNonBlockingErr(sub Subscription, err error) {
log.Error(err)
select {
case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: err.Error(), Flag: EmptyFlag}:
default:
log.Infof("unable to send error to subscription %s", sub.ID)
}
}
func sendNonBlockingQuit(sub Subscription) {
select {
case sub.QuitChan <- true:
log.Infof("closing subscription %s", sub.ID)
default:
log.Infof("unable to close subscription %s; channel has no receiver", sub.ID)
}
}

View File

@ -17,21 +17,22 @@
package serve
import (
"fmt"
"strconv"
"sync"
"time"
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/crypto"
ethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus"
"github.com/cerc-io/ipld-eth-server/v5/pkg/debug"
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v5/pkg/net"
"github.com/cerc-io/ipld-eth-server/v4/pkg/eth"
"github.com/cerc-io/ipld-eth-server/v4/pkg/net"
)
const (
@ -47,7 +48,11 @@ type Server interface {
APIs() []rpc.API
Protocols() []p2p.Protocol
// Pub-Sub handling event loop
Serve(wg *sync.WaitGroup)
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload)
// Method to subscribe to the service
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
// Method to unsubscribe from the service
Unsubscribe(id rpc.ID)
// Backend exposes the server's backend
Backend() *eth.Backend
}
@ -56,24 +61,30 @@ type Server interface {
type Service struct {
// Used to sync access to the Subscriptions
sync.Mutex
// Interface for filtering and serving data according to subscribed clients according to their specification
Filterer eth.Filterer
// Interface for fetching IPLD objects from IPFS
IPLDFetcher eth.Fetcher
// Interface for searching and retrieving CIDs from Postgres index
Retriever eth.Retriever
// Used to signal shutdown of the service
QuitChan chan bool
// Underlying db connection pool
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
Subscriptions map[common.Hash]map[rpc.ID]Subscription
// A mapping of subscription params hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]eth.SubscriptionSettings
// Underlying db
db *sqlx.DB
// wg for syncing serve processes
serveWg *sync.WaitGroup
// rpc client for forwarding cache misses
client *rpc.Client
// whether the proxied client supports state diffing
supportsStateDiffing bool
// timeout for statediff RPC calls
stateDiffTimeout time.Duration
// backend for the server
backend *eth.Backend
// whether to forward eth_calls directly to proxy node
forwardEthCalls bool
// whether to forward eth_getStorageAt directly to proxy node
forwardGetStorageAt bool
// the maximum size of the block range to use in GetLogs
getLogsBlockLimit int64
// whether to forward all calls to proxy node if they throw an error locally
proxyOnError bool
// eth node network id
@ -83,20 +94,23 @@ type Service struct {
// NewServer creates a new Server using an underlying Service struct
func NewServer(settings *Config) (Server, error) {
sap := new(Service)
sap.Retriever = eth.NewCIDRetriever(settings.DB)
sap.IPLDFetcher = eth.NewIPLDFetcher(settings.DB)
sap.Filterer = eth.NewResponseFilterer()
sap.db = settings.DB
sap.QuitChan = make(chan bool)
sap.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings)
sap.client = settings.Client
sap.supportsStateDiffing = settings.SupportStateDiff
sap.stateDiffTimeout = settings.StateDiffTimeout
sap.forwardEthCalls = settings.ForwardEthCalls
sap.forwardGetStorageAt = settings.ForwardGetStorageAt
sap.getLogsBlockLimit = settings.GetLogsBlockLimit
sap.proxyOnError = settings.ProxyOnError
sap.nodeNetworkId = settings.NodeNetworkID
var err error
sap.backend, err = eth.NewEthBackend(sap.db, &eth.Config{
ChainConfig: settings.ChainConfig,
VMConfig: vm.Config{NoBaseFee: true},
DefaultSender: settings.DefaultSender,
RPCGasCap: settings.RPCGasCap,
GroupCacheConfig: settings.GroupCache,
})
@ -125,44 +139,207 @@ func (sap *Service) APIs() []rpc.API {
Public: true,
},
}
conf := eth.APIConfig{
SupportsStateDiff: sap.supportsStateDiffing,
ForwardEthCalls: sap.forwardEthCalls,
ForwardGetStorageAt: sap.forwardGetStorageAt,
ProxyOnError: sap.proxyOnError,
StateDiffTimeout: sap.stateDiffTimeout,
GetLogsBlockLimit: sap.getLogsBlockLimit,
}
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, conf)
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing, sap.forwardEthCalls, sap.proxyOnError)
if err != nil {
log.Fatalf("unable to create public eth api: %v", err)
}
debugTracerAPI := tracers.APIs(&debug.Backend{Backend: *sap.backend})[0]
return append(apis,
rpc.API{
Namespace: eth.APIName,
Version: eth.APIVersion,
Service: ethAPI,
Public: true,
},
debugTracerAPI,
)
return append(apis, rpc.API{
Namespace: eth.APIName,
Version: eth.APIVersion,
Service: ethAPI,
Public: true,
})
}
// Serve listens for incoming converter data off the screenAndServePayload from the Sync process
// It filters and sends this data to any subscribers to the service
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
func (sap *Service) Serve(wg *sync.WaitGroup) {
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) {
sap.serveWg = wg
go func() {
wg.Add(1)
defer wg.Done()
<-sap.QuitChan
log.Info("quiting eth ipld server process")
for {
select {
case payload := <-screenAndServePayload:
sap.filterAndServe(payload)
case <-sap.QuitChan:
log.Info("quiting eth ipld server process")
return
}
}
}()
log.Debug("eth ipld server process successfully spun up")
log.Info("eth ipld server process successfully spun up")
}
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
func (sap *Service) filterAndServe(payload eth.ConvertedPayload) {
log.Debug("sending eth ipld payload to subscriptions")
sap.Lock()
sap.serveWg.Add(1)
defer sap.Unlock()
defer sap.serveWg.Done()
for ty, subs := range sap.Subscriptions {
// Retrieve the subscription parameters for this subscription type
subConfig, ok := sap.SubscriptionTypes[ty]
if !ok {
log.Errorf("eth ipld server subscription configuration for subscription type %s not available", ty.Hex())
sap.closeType(ty)
continue
}
if subConfig.End.Int64() > 0 && subConfig.End.Int64() < payload.Block.Number().Int64() {
// We are not out of range for this subscription type
// close it, and continue to the next
sap.closeType(ty)
continue
}
response, err := sap.Filterer.Filter(subConfig, payload)
if err != nil {
log.Errorf("eth ipld server filtering error: %v", err)
sap.closeType(ty)
continue
}
responseRLP, err := rlp.EncodeToBytes(response)
if err != nil {
log.Errorf("eth ipld server rlp encoding error: %v", err)
continue
}
for id, sub := range subs {
select {
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.BlockNumber.Int64()}:
log.Debugf("sending eth ipld server payload to subscription %s", id)
default:
log.Infof("unable to send eth ipld payload to subscription %s; channel has no receiver", id)
}
}
}
}
// Subscribe is used by the API to remotely subscribe to the service loop
// The params must be rlp serializable and satisfy the SubscriptionSettings() interface
func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings) {
sap.serveWg.Add(1)
defer sap.serveWg.Done()
log.Infof("new eth ipld subscription %s", id)
subscription := Subscription{
ID: id,
PayloadChan: sub,
QuitChan: quitChan,
}
// Subscription type is defined as the hash of the rlp-serialized subscription settings
by, err := rlp.EncodeToBytes(params)
if err != nil {
sendNonBlockingErr(subscription, err)
sendNonBlockingQuit(subscription)
return
}
subscriptionType := crypto.Keccak256Hash(by)
if !params.BackFillOnly {
// Add subscriber
sap.Lock()
if sap.Subscriptions[subscriptionType] == nil {
sap.Subscriptions[subscriptionType] = make(map[rpc.ID]Subscription)
}
sap.Subscriptions[subscriptionType][id] = subscription
sap.SubscriptionTypes[subscriptionType] = params
sap.Unlock()
}
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
if params.BackFill || params.BackFillOnly {
if err := sap.sendHistoricalData(subscription, id, params); err != nil {
sendNonBlockingErr(subscription, fmt.Errorf("eth ipld server subscription backfill error: %v", err))
sendNonBlockingQuit(subscription)
return
}
}
}
// sendHistoricalData sends historical data to the requesting subscription
func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params eth.SubscriptionSettings) error {
log.Infof("sending eth ipld historical data to subscription %s", id)
// Retrieve cached CIDs relevant to this subscriber
var endingBlock int64
var startingBlock int64
var err error
startingBlock, err = sap.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return err
}
if startingBlock < params.Start.Int64() {
startingBlock = params.Start.Int64()
}
endingBlock, err = sap.Retriever.RetrieveLastBlockNumber()
if err != nil {
return err
}
if endingBlock > params.End.Int64() && params.End.Int64() > 0 && params.End.Int64() > startingBlock {
endingBlock = params.End.Int64()
}
log.Debugf("eth ipld historical data starting block: %d", params.Start.Int64())
log.Debugf("eth ipld historical data ending block: %d", endingBlock)
go func() {
sap.serveWg.Add(1)
defer sap.serveWg.Done()
for i := startingBlock; i <= endingBlock; i++ {
select {
case <-sap.QuitChan:
log.Infof("ethereum historical data feed to subscription %s closed", id)
return
default:
}
cidWrappers, empty, err := sap.Retriever.Retrieve(params, i)
if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("eth ipld server cid retrieval error at block %d\r%s", i, err.Error()))
continue
}
if empty {
continue
}
for _, cids := range cidWrappers {
response, err := sap.IPLDFetcher.Fetch(cids)
if err != nil {
sendNonBlockingErr(sub, fmt.Errorf("eth ipld server ipld fetching error at block %d\r%s", i, err.Error()))
continue
}
responseRLP, err := rlp.EncodeToBytes(response)
if err != nil {
log.Error(err)
continue
}
select {
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.BlockNumber.Int64()}:
log.Debugf("eth ipld server sending historical data payload to subscription %s", id)
default:
log.Infof("eth ipld server unable to send backFill payload to subscription %s; channel has no receiver", id)
}
}
}
// when we are done backfilling send an empty payload signifying so in the msg
select {
case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: "", Flag: BackFillCompleteFlag}:
log.Debugf("eth ipld server sending backFill completion notice to subscription %s", id)
default:
log.Infof("eth ipld server unable to send backFill completion notice to subscription %s", id)
}
}()
return nil
}
// Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop
func (sap *Service) Unsubscribe(id rpc.ID) {
log.Infof("unsubscribing %s from the eth ipld server", id)
sap.Lock()
for ty := range sap.Subscriptions {
delete(sap.Subscriptions[ty], id)
if len(sap.Subscriptions[ty]) == 0 {
// If we removed the last subscription of this type, remove the subscription type outright
delete(sap.Subscriptions, ty)
delete(sap.SubscriptionTypes, ty)
}
}
sap.Unlock()
}
// Start is used to begin the service
@ -170,16 +347,18 @@ func (sap *Service) Serve(wg *sync.WaitGroup) {
func (sap *Service) Start() error {
log.Info("starting eth ipld server")
wg := new(sync.WaitGroup)
sap.Serve(wg)
payloadChan := make(chan eth.ConvertedPayload, PayloadChanBufferSize)
sap.Serve(wg, payloadChan)
return nil
}
// Stop is used to close down the service
// This is mostly just to satisfy the node.Service interface
func (sap *Service) Stop() error {
log.Info("stopping eth ipld server")
log.Infof("stopping eth ipld server")
sap.Lock()
close(sap.QuitChan)
sap.close()
sap.Unlock()
return nil
}
@ -188,3 +367,28 @@ func (sap *Service) Stop() error {
func (sap *Service) Backend() *eth.Backend {
return sap.backend
}
// close is used to close all listening subscriptions
// close needs to be called with subscription access locked
func (sap *Service) close() {
log.Infof("closing all eth ipld server subscriptions")
for subType, subs := range sap.Subscriptions {
for _, sub := range subs {
sendNonBlockingQuit(sub)
}
delete(sap.Subscriptions, subType)
delete(sap.SubscriptionTypes, subType)
}
}
// closeType is used to close all subscriptions of given type
// closeType needs to be called with subscription access locked
func (sap *Service) closeType(subType common.Hash) {
log.Infof("closing all eth ipld server subscriptions of type %s", subType.String())
subs := sap.Subscriptions[subType]
for _, sub := range subs {
sendNonBlockingQuit(sub)
}
delete(sap.Subscriptions, subType)
delete(sap.SubscriptionTypes, subType)
}

60
pkg/serve/subscription.go Normal file
View File

@ -0,0 +1,60 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package serve
import (
"errors"
"github.com/ethereum/go-ethereum/rpc"
)
type Flag int32
const (
EmptyFlag Flag = iota
BackFillCompleteFlag
)
// Subscription holds the information for an individual client subscription to the watcher
type Subscription struct {
ID rpc.ID
PayloadChan chan<- SubscriptionPayload
QuitChan chan<- bool
}
// SubscriptionPayload is the struct for a watcher data subscription payload
// It carries data of a type specific to the chain being supported/queried and an error message
type SubscriptionPayload struct {
Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload
Height int64 `json:"height"`
Err string `json:"err"` // field for error
Flag Flag `json:"flag"` // field for message
}
func (sp SubscriptionPayload) Error() error {
if sp.Err == "" {
return nil
}
return errors.New(sp.Err)
}
func (sp SubscriptionPayload) BackFillComplete() bool {
if sp.Flag == BackFillCompleteFlag {
return true
}
return false
}

View File

@ -16,12 +16,9 @@
package shared
import "time"
const (
DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchNumber int64 = 50
DefaultStateDiffTimeout time.Duration = 240 * time.Second
DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchNumber int64 = 50
GcachePoolEnabled = "GCACHE_POOL_ENABLED"
GcachePoolHttpPath = "GCACHE_POOL_HTTP_PATH"

View File

@ -17,7 +17,7 @@
package shared
import (
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/jmoiron/sqlx"
)

View File

@ -17,9 +17,12 @@
package shared
import (
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
"github.com/ethereum/go-ethereum/common"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
)
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer
@ -41,6 +44,36 @@ func HandleZeroAddr(to common.Address) string {
// Rollback sql transaction and log any error
func Rollback(tx *sqlx.Tx) {
if err := tx.Rollback(); err != nil {
log.Error(err)
logrus.Error(err)
}
}
// FetchIPLDByMhKeyAndBlockNumber is used to retrieve an ipld from Postgres blockstore with the provided tx, mhkey string and blockNumber
func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint64) ([]byte, error) {
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
var block []byte
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
}
// FetchIPLD is used to retrieve an IPLD from Postgres mhkey and blockNumber
func FetchIPLD(db *sqlx.DB, mhKey string, blockNumber uint64) ([]byte, error) {
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
var block []byte
return block, db.Get(&block, pgStr, mhKey, blockNumber)
}
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
func MultihashKeyFromCID(c cid.Cid) string {
dbKey := dshelp.MultihashToDsKey(c.Hash())
return blockstore.BlockPrefix.String() + dbKey.String()
}
// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
func MultihashKeyFromCIDString(c string) (string, error) {
dc, err := cid.Decode(c)
if err != nil {
return "", err
}
dbKey := dshelp.MultihashToDsKey(dc.Hash())
return blockstore.BlockPrefix.String() + dbKey.String(), nil
}

View File

@ -19,16 +19,18 @@ package shared
import (
"bytes"
"context"
"os"
"strconv"
. "github.com/onsi/gomega"
"github.com/cerc-io/plugeth-statediff/indexer"
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
"github.com/cerc-io/plugeth-statediff/indexer/models"
"github.com/cerc-io/plugeth-statediff/indexer/node"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/jmoiron/sqlx"
)
@ -44,8 +46,7 @@ func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool {
// SetupDB is use to setup a db for watcher tests
func SetupDB() *sqlx.DB {
config, err := postgres.TestConfig.WithEnv()
Expect(err).NotTo(HaveOccurred())
config := getTestDBConfig()
db, err := NewDB(config.DbConnectionString(), config)
Expect(err).NotTo(HaveOccurred())
@ -59,8 +60,6 @@ func TearDownDB(db *sqlx.DB) {
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM nodes`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM ipld.blocks`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.uncle_cids`)
@ -73,6 +72,12 @@ func TearDownDB(db *sqlx.DB) {
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.state_accounts`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.access_list_elements`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM blocks`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth_meta.watched_addresses`)
@ -91,10 +96,20 @@ func SetupTestStateDiffIndexer(ctx context.Context, chainConfig *params.ChainCon
ChainID: params.TestChainConfig.ChainID.Uint64(),
}
dbconfig, err := postgres.TestConfig.WithEnv()
Expect(err).NotTo(HaveOccurred())
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, dbconfig, true)
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, getTestDBConfig())
Expect(err).NotTo(HaveOccurred())
return stateDiffIndexer
}
func getTestDBConfig() postgres.Config {
port, _ := strconv.Atoi(os.Getenv("DATABASE_PORT"))
return postgres.Config{
Hostname: os.Getenv("DATABASE_HOSTNAME"),
DatabaseName: os.Getenv("DATABASE_NAME"),
Username: os.Getenv("DATABASE_USER"),
Password: os.Getenv("DATABASE_PASSWORD"),
Port: port,
Driver: postgres.SQLX,
}
}

137
scripts/gomoderator.py Normal file
View File

@ -0,0 +1,137 @@
import os
import sys
import subprocess
import errno
from typing import List, Dict
"""
Resolves dependency conflicts between a plugin repository's and the core repository's go.mods
Usage: python3 gomoderator.py {path_to_core_repository} {path_to_plugin_repository}
"""
ERROR_INVALID_NAME = 123
def is_pathname_valid(pathname: str) -> bool:
"""
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
try:
if not isinstance(pathname, str) or not pathname:
return False
_, pathname = os.path.splitdrive(pathname)
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
except TypeError as exc:
return False
else:
return True
def map_deps_to_version(deps_arr: List[str]) -> Dict[str, str]:
mapping = {}
for d in deps_arr:
if d.find(' => ') != -1:
ds = d.split(' => ')
d = ds[1]
d = d.replace(" v", "[>v") # might be able to just split on the empty space not _v and skip this :: insertion
d_and_v = d.split("[>")
mapping[d_and_v[0]] = d_and_v[1]
return mapping
# argument checks
assert len(sys.argv) == 3, "need core repository and plugin repository path arguments"
core_repository_path = sys.argv[1]
plugin_repository_path = sys.argv[2]
assert is_pathname_valid(core_repository_path), "core repository path argument is not valid"
assert is_pathname_valid(plugin_repository_path), "plugin repository path argument is not valid"
# collect `go list -m all` output from both repositories; remain in the plugin repository
os.chdir(core_repository_path)
core_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
os.chdir(plugin_repository_path)
plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
core_deps = core_deps_b.decode("utf-8")
core_deps_arr = core_deps.splitlines()
del core_deps_arr[0] # first line is the project repo itself
plugin_deps = plugin_deps_b.decode("utf-8")
plugin_deps_arr = plugin_deps.splitlines()
del plugin_deps_arr[0]
core_deps_mapping = map_deps_to_version(core_deps_arr)
plugin_deps_mapping = map_deps_to_version(plugin_deps_arr)
# iterate over dependency maps for both repos and find version conflicts
# attempt to resolve conflicts by adding adding a `require` for the core version to the plugin's go.mod file
none = True
for dep, core_version in core_deps_mapping.items():
if dep in plugin_deps_mapping.keys():
plugin_version = plugin_deps_mapping[dep]
if core_version != plugin_version:
print(f'{dep} has a conflict: core is using version {core_version} '
f'but the plugin is using version {plugin_version}')
fixed_dep = f'{dep}@{core_version}'
print(f'attempting fix by `go mod edit -require={fixed_dep}')
subprocess.check_call(["go", "mod", "edit", f'-require={fixed_dep}'])
none = False
if none:
print("no conflicts to resolve")
quit()
# the above process does not work for all dep conflicts e.g. golang.org/x/text v0.3.0 will not stick this way
# so we will try the `go get {dep}` route for any remaining conflicts
updated_plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
updated_plugin_deps = updated_plugin_deps_b.decode("utf-8")
updated_plugin_deps_arr = updated_plugin_deps.splitlines()
del updated_plugin_deps_arr[0]
updated_plugin_deps_mapping = map_deps_to_version(updated_plugin_deps_arr)
none = True
for dep, core_version in core_deps_mapping.items():
if dep in updated_plugin_deps_mapping.keys():
updated_plugin_version = updated_plugin_deps_mapping[dep]
if core_version != updated_plugin_version:
print(f'{dep} still has a conflict: core is using version {core_version} '
f'but the plugin is using version {updated_plugin_version}')
fixed_dep = f'{dep}@{core_version}'
print(f'attempting fix by `go get {fixed_dep}')
subprocess.check_call(["go", "get", fixed_dep])
none = False
if none:
print("all conflicts have been resolved")
quit()
# iterate over plugins `go list -m all` output one more time and inform whether or not the above has worked
final_plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
final_plugin_deps = final_plugin_deps_b.decode("utf-8")
final_plugin_deps_arr = final_plugin_deps.splitlines()
del final_plugin_deps_arr[0]
final_plugin_deps_mapping = map_deps_to_version(final_plugin_deps_arr)
none = True
for dep, core_version in core_deps_mapping.items():
if dep in final_plugin_deps_mapping.keys():
final_plugin_version = final_plugin_deps_mapping[dep]
if core_version != final_plugin_version:
print(f'{dep} STILL has a conflict: core is using version {core_version} '
f'but the plugin is using version {final_plugin_version}')
none = False
if none:
print("all conflicts have been resolved")
quit()
print("failed to resolve all conflicts")

15
scripts/install-postgres-11.sh Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -ex
echo "Installing Postgres 11"
sudo service postgresql stop
sudo apt-get remove -q 'postgresql-*'
sudo apt-get update -q
sudo apt-get install -q postgresql-11 postgresql-client-11
sudo cp /etc/postgresql/{9.6,11}/main/pg_hba.conf
echo "Restarting Postgres 11"
sudo service postgresql restart
sudo psql -c 'CREATE ROLE travis SUPERUSER LOGIN CREATEDB;' -U postgres

View File

@ -1,53 +0,0 @@
#!/bin/bash
# Builds and deploys a stack with only what we need.
# This script assumes we are running in the project root.
set -e
export DOCKER_BUILDKIT=1
# Prevent conflicting tty output
export BUILDKIT_PROGRESS=plain
CONFIG_DIR=$(readlink -f "${CONFIG_DIR:-$(mktemp -d)}")
# By default assume we are running in the project root
export CERC_REPO_BASE_DIR="${CERC_REPO_BASE_DIR:-..}"
# v5 migrations only go up to version 18
echo CERC_STATEDIFF_DB_GOOSE_MIN_VER=18 >> $CONFIG_DIR/stack.env
# Pass this in so we can run eth_call forwarding tests, which expect no IPLD DB
echo CERC_RUN_STATEDIFF=${CERC_RUN_STATEDIFF:-true} >> $CONFIG_DIR/stack.env
laconic_so="${LACONIC_SO:-laconic-so} --stack fixturenet-plugeth-tx --verbose"
set -x
if [[ -z $SKIP_BUILD ]]; then
$laconic_so setup-repositories \
--exclude github.com/cerc-io/ipld-eth-server,github.com/cerc-io/tx-spammer,github.com/dboreham/foundry \
--branches-file ./test/stack-refs.txt
$laconic_so build-containers \
--exclude cerc/ipld-eth-server,cerc/keycloak,cerc/tx-spammer,cerc/foundry
fi
$laconic_so deploy \
--include fixturenet-plugeth,ipld-eth-db \
--env-file $CONFIG_DIR/stack.env \
--cluster test up
set +x
# Get IPv4 endpoint of geth file server
bootnode_endpoint=$(docker port test-fixturenet-eth-bootnode-geth-1 9898 | head -1)
# Extract the chain config and ID from genesis file
curl -s $bootnode_endpoint/geth.json | jq '.config' > $CONFIG_DIR/chain.json
# Output vars if we are running on Github
if [[ -n "$GITHUB_ENV" ]]; then
echo ETH_CHAIN_ID="$(jq '.chainId' $CONFIG_DIR/chain.json)" >> "$GITHUB_ENV"
echo ETH_CHAIN_CONFIG="$CONFIG_DIR/chain.json" >> "$GITHUB_ENV"
echo ETH_HTTP_PATH=$(docker port test-fixturenet-eth-geth-1-1 8545 | head -1) >> "$GITHUB_ENV"
# Read a private key so we can send from a funded account
echo DEPLOYER_PRIVATE_KEY=$(curl -s $bootnode_endpoint/accounts.csv | head -1 | cut -d',' -f3) >> "$GITHUB_ENV"
fi

27
scripts/reset_db Executable file
View File

@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Provide me with a postgres database name, and I will:
# - Drop the database
# - Recreate the database
# - Run the vulcanizedb migration
if [ "$1" = "" ]; then
echo "Provide a database name to reset"
exit 1
fi
db=$1
dir=$(basename "$(pwd)")
if [ $dir != "ipld-eth-server" ]
then
echo "Run me from the ipld-eth-server root dir"
exit 1
fi
user=$(whoami)
psql -c "DROP DATABASE $db" postgres
if [ $? -eq 0 ]; then
psql -c "CREATE DATABASE $db WITH OWNER $user" postgres
make migrate HOST_NAME=localhost NAME=$db PORT=5432
else
echo "Couldnt drop the database. Are you connected? Does it exist?"
fi

17
scripts/run_integration_test.sh Executable file
View File

@ -0,0 +1,17 @@
set -e
set -o xtrace
export ETH_FORWARD_ETH_CALLS=false
export DB_WRITE=true
export ETH_PROXY_ON_ERROR=false
export PGPASSWORD=password
export DATABASE_USER=vdbm
export DATABASE_PORT=8077
export DATABASE_PASSWORD=password
export DATABASE_HOSTNAME=127.0.0.1
# Wait for containers to be up and execute the integration test.
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

View File

@ -0,0 +1,17 @@
set -e
set -o xtrace
export ETH_FORWARD_ETH_CALLS=true
export DB_WRITE=false
export ETH_PROXY_ON_ERROR=false
export PGPASSWORD=password
export DATABASE_USER=vdbm
export DATABASE_PORT=8077
export DATABASE_PASSWORD=password
export DATABASE_HOSTNAME=127.0.0.1
# Wait for containers to be up and execute the integration test.
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

16
scripts/run_unit_test.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
# Remove any existing containers / volumes
docker-compose down --remove-orphans --volumes
# Spin up DB and run migrations
docker-compose up -d migrations ipld-eth-db
sleep 30
# Run unit tests
go clean -testcache
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
# Clean up
docker-compose down --remove-orphans --volumes
rm -rf out/

119
test/README.md Normal file
View File

@ -0,0 +1,119 @@
# Test Insructions
## Setup
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.2.1-alpha) in ipld-eth-db repo.
```bash
# In ipld-eth-db repo.
git checkout v4.2.1-alpha
```
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.21-statediff-4.1.2-alpha) in go-ethereum repo.
```bash
# In go-ethereum repo.
git checkout v1.10.21-statediff-4.1.2-alpha
```
- Checkout working commit in stack-orchestrator repo.
```bash
# In stack-orchestrator repo.
git checkout f2fd766f5400fcb9eb47b50675d2e3b1f2753702
```
## Run
- Run unit tests:
```bash
# In ipld-eth-server root directory.
./scripts/run_unit_test.sh
```
- Run integration tests:
- In stack-orchestrator repo, create config file:
```bash
cd helper-scripts
./create-config.sh
```
A `config.sh` will be created in the root directory.
- Update/Edit the generated config file with:
```bash
#!/bin/bash
# Path to ipld-eth-server repo.
vulcanize_ipld_eth_db=~/ipld-eth-db/
# Path to go-ethereum repo.
vulcanize_go_ethereum=~/go-ethereum/
# Path to ipld-eth-server repo.
vulcanize_ipld_eth_server=~/ipld-eth-server/
# Path to test contract.
vulcanize_test_contract=~/ipld-eth-server/test/contract
genesis_file_path='start-up-files/go-ethereum/genesis.json'
db_write=true
eth_forward_eth_calls=false
eth_proxy_on_error=false
eth_http_path="go-ethereum:8545"
```
- Run stack-orchestrator:
```bash
# In stack-orchestrator root directory.
cd helper-scripts
./wrapper.sh \
-e docker \
-d ../docker/local/docker-compose-db-sharding.yml \
-d ../docker/local/docker-compose-go-ethereum.yml \
-d ../docker/local/docker-compose-ipld-eth-server.yml \
-d ../docker/local/docker-compose-contract.yml \
-v remove \
-p ../config.sh
```
- Run test:
```bash
# In ipld-eth-server root directory.
./scripts/run_integration_test.sh
```
- Update stack-orchestrator `config.sh` file:
```bash
#!/bin/bash
# Path to go-ethereum repo.
vulcanize_go_ethereum=~/go-ethereum/
# Path to ipld-eth-server repo.
vulcanize_ipld_eth_server=~/ipld-eth-server/
# Path to test contract.
vulcanize_test_contract=~/ipld-eth-server/test/contract
genesis_file_path='start-up-files/go-ethereum/genesis.json'
db_write=false
eth_forward_eth_calls=true
eth_proxy_on_error=false
eth_http_path="go-ethereum:8545"
```
- Stop the stack-orchestrator and start again using the same command
- Run integration tests for direct proxy fall-through of eth_calls:
```bash
./scripts/run_integration_test_forward_eth_calls.sh
```

View File

@ -1,26 +0,0 @@
# Containers to run a DB instance for tests
services:
migrations:
restart: on-failure
depends_on:
- ipld-eth-db
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.0.5-alpha
environment:
DATABASE_USER: "vdbm"
DATABASE_NAME: "cerc_testing"
DATABASE_PASSWORD: "password"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
ipld-eth-db:
container_name: test-ipld-eth-db
image: timescale/timescaledb:latest-pg14
restart: always
command: ["postgres", "-c", "log_statement=all"]
environment:
POSTGRES_USER: "vdbm"
POSTGRES_DB: "cerc_testing"
POSTGRES_PASSWORD: "password"
ports:
- 127.0.0.1:8077:5432

View File

@ -1,44 +0,0 @@
# Runs the IPLD server and contract deployment server
services:
ipld-eth-server:
restart: unless-stopped
image: cerc/ipld-eth-server:local
networks:
- test_default
environment:
DATABASE_NAME: "cerc_testing"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432
DATABASE_USER: "vdbm"
DATABASE_PASSWORD: "password"
ETH_HTTP_PATH: fixturenet-eth-geth-1:8545
ETH_CHAIN_CONFIG: /tmp/chain.json
ETH_PROXY_ON_ERROR: false
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS
SERVER_HTTP_PATH: 0.0.0.0:8081
VDB_COMMAND: serve
LOG_LEVEL: debug
volumes:
- type: bind
source: $ETH_CHAIN_CONFIG
target: /tmp/chain.json
ports:
- 127.0.0.1:8081:8081
contract-deployer:
restart: on-failure
image: cerc/ipld-eth-server/contract-deployer:local
build: ./contract
networks:
- test_default
environment:
ETH_ADDR: "http://fixturenet-eth-geth-1:8545"
ETH_CHAIN_ID: $ETH_CHAIN_ID
DEPLOYER_PRIVATE_KEY: $DEPLOYER_PRIVATE_KEY
ports:
- 127.0.0.1:3000:3000
networks:
test_default:
external: true

View File

@ -1,5 +1,7 @@
# Downgrade from 18.16, see https://github.com/NomicFoundation/hardhat/issues/3877
FROM node:18.15-slim
FROM node:14
ARG ETH_ADDR
ENV ETH_ADDR $ETH_ADDR
WORKDIR /usr/src/app
COPY package*.json ./
@ -9,4 +11,4 @@ RUN npm run compile && ls -lah
EXPOSE 3000
ENTRYPOINT ["node", "src/index.js"]
ENTRYPOINT ["npm", "start"]

View File

@ -1,6 +1,5 @@
pragma solidity ^0.8.0;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
contract GLDToken is ERC20 {
constructor() ERC20("Gold", "GLD") {
_mint(msg.sender, 1000000000000000000000);

View File

@ -16,16 +16,6 @@ task("accounts", "Prints the list of accounts", async () => {
/**
* @type import('hardhat/config').HardhatUserConfig
*/
const localNetwork = {
url: process.env.ETH_ADDR || "http://127.0.0.1:8545",
chainId: Number(process.env.ETH_CHAIN_ID) || 99,
};
if (process.env.DEPLOYER_PRIVATE_KEY) {
localNetwork["accounts"] = [process.env.DEPLOYER_PRIVATE_KEY];
}
module.exports = {
solidity: {
version: "0.8.0",
@ -40,7 +30,14 @@ module.exports = {
}
},
networks: {
local: localNetwork
},
defaultNetwork: "local"
local: {
url: 'http://127.0.0.1:8545',
chainId: 99
},
docker: {
url: process.env.ETH_ADDR,
chainId: 99
}
}
};

File diff suppressed because it is too large Load Diff

View File

@ -4,21 +4,23 @@
"main": "index.js",
"scripts": {
"compile": "npx hardhat compile",
"start": "node src/index.js"
"start": "HARDHAT_NETWORK=docker node src/index.js",
"start:local": "ETH_ADDR=http://127.0.0.1:8545 npm run start",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "Solidity contract deployment server for integration testing",
"description": "",
"dependencies": {
"@openzeppelin/contracts": "^4.0.0",
"fastify": "^4.0.0",
"hardhat": "^2.14.0",
"fastify": "^3.14.2",
"hardhat": "^2.2.0",
"solidity-create2-deployer": "^0.4.0"
},
"devDependencies": {
"@nomiclabs/hardhat-ethers": "^2.2.3",
"@nomiclabs/hardhat-waffle": "^2.0.4",
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1",
"chai": "^4.3.4",
"ethereum-waffle": "^3.3.0",
"ethers": "^5.1.0"

Some files were not shown because too many files have changed in this diff Show More