Compare commits
No commits in common. "v5" and "release-v4.1.0-alpha" have entirely different histories.
v5
...
release-v4
@ -1,18 +1,16 @@
|
||||
.git
|
||||
.travis.yml
|
||||
.idea
|
||||
bin
|
||||
.gitignore
|
||||
.private_blockchain_password
|
||||
.travis.yml
|
||||
|
||||
integration_test
|
||||
LICENSE
|
||||
postgraphile
|
||||
.private_blockchain_password
|
||||
README.md
|
||||
|
||||
integration
|
||||
test
|
||||
scripts
|
||||
Supfile
|
||||
test_config
|
||||
.travis.yml
|
||||
vulcanizedb.log
|
||||
Dockerfile
|
||||
environments
|
||||
|
||||
**/node_modules
|
||||
build
|
||||
79
.github/workflows/on-pr-publish.yaml
vendored
Normal file
79
.github/workflows/on-pr-publish.yaml
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
name: Test, Build, and/or Publish
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
pre_job:
|
||||
# continue-on-error: true # Uncomment once integration is finished
|
||||
runs-on: ubuntu-latest
|
||||
# Map a step output to a job output
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v4
|
||||
with:
|
||||
# All of these options are optional, so you can remove them if you are happy with the defaults
|
||||
concurrent_skipping: "never"
|
||||
skip_after_successful_duplicate: "true"
|
||||
do_not_skip: '["workflow_dispatch", "schedule"]'
|
||||
run-tests:
|
||||
uses: ./.github/workflows/tests.yaml
|
||||
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
|
||||
needs: pre_job
|
||||
secrets:
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
with:
|
||||
STACK_ORCHESTRATOR_REF: "f2fd766f5400fcb9eb47b50675d2e3b1f2753702"
|
||||
GO_ETHEREUM_REF: "7b4ef34de2b9469c3f82972b60e38b34c99c5382"
|
||||
IPLD_ETH_DB_REF: "b59505eab252670c622b42ce60621e9747fb64f9"
|
||||
build:
|
||||
name: Run docker build
|
||||
runs-on: ubuntu-latest
|
||||
needs: run-tests
|
||||
if: |
|
||||
always() &&
|
||||
(needs.run-tests.result == 'success' || needs.run-tests.result == 'skipped') &&
|
||||
github.event_name == 'release'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||
- name: Run docker build
|
||||
run: make docker-build
|
||||
- name: Tag docker image
|
||||
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Docker Login
|
||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||
- name: Docker Push
|
||||
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
|
||||
push_to_registries:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
always() &&
|
||||
(needs.build.result == 'success') &&
|
||||
github.event_name == 'release'
|
||||
needs: build
|
||||
steps:
|
||||
- name: Get the version
|
||||
id: vars
|
||||
run: |
|
||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||
- name: Docker Login to Github Registry
|
||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||
- name: Docker Pull
|
||||
run: docker pull docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
- name: Docker Login to Docker Registry
|
||||
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
||||
- name: Tag docker image
|
||||
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
- name: Docker Push to Docker Hub
|
||||
run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
28
.github/workflows/publish.yaml
vendored
28
.github/workflows/publish.yaml
vendored
@ -1,28 +0,0 @@
|
||||
name: Publish Docker image
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
jobs:
|
||||
build:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- id: vars
|
||||
name: Output SHA and version tag
|
||||
run: |
|
||||
echo "sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||
echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
- name: Build and tag Docker image
|
||||
env:
|
||||
GIT_VDBTO_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: |
|
||||
docker build . \
|
||||
--build-arg GIT_VDBTO_TOKEN \
|
||||
-t git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} \
|
||||
-t git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
- name: Push Docker tags
|
||||
run: |
|
||||
echo ${{ secrets.GITEA_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||
docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||
docker push git.vdb.to/cerc-io/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||
28
.github/workflows/run_unit_test.sh
vendored
Executable file
28
.github/workflows/run_unit_test.sh
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Set up repo
|
||||
start_dir=$(pwd)
|
||||
temp_dir=$(mktemp -d)
|
||||
cd $temp_dir
|
||||
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
||||
cd ipld-eth-server
|
||||
|
||||
## Remove the branch and github related info. This way future runs wont be confused.
|
||||
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||
|
||||
# Spin up DB and run migrations
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
trap "docker-compose down -v --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||
sleep 30
|
||||
|
||||
# Remove old logs so there's no confusion, then run test
|
||||
rm -f /tmp/test.log /tmp/return_test.txt
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=localhost DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
||||
echo $? > /tmp/return_test.txt
|
||||
|
||||
# Clean up
|
||||
docker-compose down -v --remove-orphans
|
||||
cd $start_dir
|
||||
rm -fr $temp_dir
|
||||
292
.github/workflows/tests.yaml
vendored
292
.github/workflows/tests.yaml
vendored
@ -1,128 +1,212 @@
|
||||
name: Test the stack.
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
# Job headers are hidden when not top-level - run them directly for readability until fixed:
|
||||
# https://github.com/go-gitea/gitea/issues/26736
|
||||
pull_request:
|
||||
branches: '*'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- ci-test
|
||||
|
||||
env:
|
||||
SO_VERSION: v1.1.0-36d4969-202407091537
|
||||
FIXTURENET_ETH_STACKS_REF: main
|
||||
SYSTEM_TESTS_REF: main
|
||||
secrets:
|
||||
BUILD_HOSTNAME:
|
||||
required: true
|
||||
BUILD_USERNAME:
|
||||
required: true
|
||||
BUILD_KEY:
|
||||
required: true
|
||||
inputs:
|
||||
STACK_ORCHESTRATOR_REF:
|
||||
required: true
|
||||
type: string
|
||||
GO_ETHEREUM_REF:
|
||||
required: true
|
||||
type: string
|
||||
IPLD_ETH_DB_REF:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Run docker build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run docker build
|
||||
run: make docker-build
|
||||
test:
|
||||
name: Run unit tests
|
||||
env:
|
||||
GOPATH: /tmp/go
|
||||
# To run the unit tests you need to add secrets to your repository.
|
||||
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||
#strategy:
|
||||
# matrix:
|
||||
# go-version: [1.16.x, 1.17.x]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Run DB container
|
||||
run: docker compose -f test/compose-db.yml up --wait --quiet-pull
|
||||
- name: Build and run tests
|
||||
run: |
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v -r --skip-package=./integration
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
integration-test:
|
||||
# Passed experience with GHA has taught me to store variables in files instead of passing them as variables.
|
||||
- name: Output variables to files
|
||||
run: |
|
||||
echo $GITHUB_REPOSITORY > /tmp/git_repository
|
||||
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
|
||||
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||
echo ${{ env.BUILD_KEY }} >> /tmp/key
|
||||
echo "-----END OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||
chmod 400 /tmp/key
|
||||
cat /tmp/git_repository
|
||||
cat /tmp/git_head_ref
|
||||
|
||||
- name: Raw SCP
|
||||
run: |
|
||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
|
||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
|
||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
|
||||
|
||||
- name: Trigger Unit Test
|
||||
run: |
|
||||
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} go install github.com/onsi/ginkgo/ginkgo@latest
|
||||
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} /tmp/run_unit_test.sh
|
||||
|
||||
- name: Get the logs and cat them
|
||||
run: |
|
||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/test.log .
|
||||
cat ./test.log
|
||||
|
||||
- name: Check Error Code
|
||||
run: |
|
||||
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/return_test.txt .
|
||||
[ $(cat ./return_test.txt) -eq 0 ]
|
||||
|
||||
integrationtest:
|
||||
name: Run integration tests
|
||||
env:
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: true
|
||||
ETH_FORWARD_ETH_CALLS: false
|
||||
ETH_PROXY_ON_ERROR: false
|
||||
ETH_HTTP_PATH: "go-ethereum:8545"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- name: Create GOPATH
|
||||
run: mkdir -p /tmp/go
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: ">=1.18.0"
|
||||
check-latest: true
|
||||
|
||||
- name: Build server image
|
||||
env:
|
||||
GIT_VDBTO_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: docker build . -t cerc/ipld-eth-server:local --build-arg GIT_VDBTO_TOKEN
|
||||
- name: Install jq
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: apt-get update && apt-get install -y jq
|
||||
# At present the stock setup-python action fails on Linux/aarch64
|
||||
# Conditional steps below workaroud this by using deadsnakes for that case only
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
python-version: 3.11
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
python-version: 3.11
|
||||
- name: Install stack-orchestrator
|
||||
uses: actions/checkout@v3
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: cerc-io/stack-orchestrator
|
||||
ref: ${{ env.SO_VERSION }}
|
||||
path: ./stack-orchestrator
|
||||
- run: pip install ./stack-orchestrator
|
||||
- name: Clone fixturenet stack repo
|
||||
uses: actions/checkout@v4
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: cerc-io/fixturenet-eth-stacks
|
||||
ref: ${{ env.FIXTURENET_ETH_STACKS_REF }}
|
||||
path: ./fixturenet-eth-stacks
|
||||
progress: false
|
||||
|
||||
- name: Run testnet stack
|
||||
env:
|
||||
CERC_GO_AUTH_TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: ./scripts/run-test-stack.sh ./fixturenet-eth-stacks/stack-orchestrator/stacks/fixturenet-plugeth
|
||||
- name: Run server
|
||||
env:
|
||||
ETH_FORWARD_ETH_CALLS: false
|
||||
run: docker compose -f test/compose-server.yml up --wait --quiet-pull
|
||||
- name: Run tests
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: vulcanize/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
run: |
|
||||
sleep 30
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v --label-filter '!proxy' -r ./integration
|
||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >> ./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||
cat ./config.sh
|
||||
- name: Build geth
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
||||
./compile-geth.sh \
|
||||
-p "$GITHUB_WORKSPACE/config.sh" \
|
||||
-e docker
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/ipld-eth-server
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||
make integrationtest
|
||||
|
||||
- name: Clone system-tests
|
||||
uses: actions/checkout@v4
|
||||
integrationtest_forwardethcalls:
|
||||
name: Run integration tests for direct proxy fall-through of eth_calls
|
||||
env:
|
||||
GOPATH: /tmp/go
|
||||
DB_WRITE: false
|
||||
ETH_FORWARD_ETH_CALLS: true
|
||||
ETH_PROXY_ON_ERROR: false
|
||||
ETH_HTTP_PATH: "go-ethereum:8545"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create GOPATH
|
||||
run: mkdir -p /tmp/go
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
repository: cerc-io/system-tests
|
||||
ref: ${{ env.SYSTEM_TESTS_REF }}
|
||||
path: ./system-tests
|
||||
token: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
progress: false
|
||||
- name: Run system tests
|
||||
working-directory: ./system-tests
|
||||
# Work around dependency conflict in system-tests:
|
||||
# web3 uses an older eth-account until (unreleased) v7
|
||||
go-version: ">=1.18.0"
|
||||
check-latest: true
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: "./ipld-eth-server"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ inputs.STACK_ORCHESTRATOR_REF }}
|
||||
path: "./stack-orchestrator/"
|
||||
repository: vulcanize/stack-orchestrator
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ inputs.GO_ETHEREUM_REF }}
|
||||
repository: vulcanize/go-ethereum
|
||||
path: "./go-ethereum/"
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ inputs.IPLD_ETH_DB_REF }}
|
||||
repository: vulcanize/ipld-eth-db
|
||||
path: "./ipld-eth-db/"
|
||||
- name: Create config file
|
||||
run: |
|
||||
pip3 install pytest
|
||||
pip3 install -r requirements.txt
|
||||
pip3 install --no-deps 'eth-account>=0.12.3,<0.13'
|
||||
pip3 install 'pydantic>=2.0.0'
|
||||
# Skips tests that require Blob indexing
|
||||
python3 -m pytest -vv -m "not blob_db"
|
||||
|
||||
- name: Run testnet stack without statediff
|
||||
env:
|
||||
CERC_RUN_STATEDIFF: false
|
||||
SKIP_BUILD: 1
|
||||
run: ./scripts/run-test-stack.sh ./fixturenet-eth-stacks/stack-orchestrator/stacks/fixturenet-plugeth
|
||||
- name: Run server with call forwarding
|
||||
env:
|
||||
ETH_FORWARD_ETH_CALLS: true
|
||||
run: docker compose -f test/compose-server.yml up --wait --quiet-pull
|
||||
- name: Run eth_call proxy tests
|
||||
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||
echo vulcanize_ipld_eth_db=$GITHUB_WORKSPACE/ipld-eth-db/ >> ./config.sh
|
||||
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||
echo vulcanize_test_contract=$GITHUB_WORKSPACE/ipld-eth-server/test/contract >>./config.sh
|
||||
echo genesis_file_path=start-up-files/go-ethereum/genesis.json >> ./config.sh
|
||||
echo db_write=$DB_WRITE >> ./config.sh
|
||||
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||
cat ./config.sh
|
||||
- name: Build geth
|
||||
run: |
|
||||
sleep 30
|
||||
go install github.com/onsi/ginkgo/v2/ginkgo
|
||||
ginkgo -v --label-filter 'proxy' -r ./integration
|
||||
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
||||
./compile-geth.sh \
|
||||
-p "$GITHUB_WORKSPACE/config.sh" \
|
||||
-e docker
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker-compose \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-db-sharding.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-contract.yml" \
|
||||
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||
up -d --build
|
||||
- name: Test
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/ipld-eth-server
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||
make integrationtest
|
||||
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@ -1,2 +1,19 @@
|
||||
.idea
|
||||
.vscode
|
||||
test_data_dir/
|
||||
contracts/*
|
||||
environments/*.toml
|
||||
Vagrantfile
|
||||
vagrant*.sh
|
||||
.vagrant
|
||||
test_scripts/
|
||||
ipld-eth-server
|
||||
postgraphile/build/
|
||||
postgraphile/node_modules/
|
||||
postgraphile/package-lock.json
|
||||
vulcanizedb.log
|
||||
db/migrations/20*.sql
|
||||
plugins/*.so
|
||||
postgraphile/*.toml
|
||||
postgraphile/schema.graphql
|
||||
vulcanizedb.pem
|
||||
|
||||
39
Dockerfile
39
Dockerfile
@ -1,34 +1,34 @@
|
||||
FROM golang:1.21-alpine AS debugger
|
||||
FROM golang:1.18-alpine as builder
|
||||
|
||||
# Include dlv
|
||||
RUN go install github.com/go-delve/delve/cmd/dlv@v1.22.1
|
||||
|
||||
FROM golang:1.21-alpine AS builder
|
||||
|
||||
RUN apk --update --no-cache add gcc musl-dev binutils-gold git
|
||||
RUN apk --update --no-cache add make git g++ linux-headers
|
||||
# DEBUG
|
||||
RUN apk add busybox-extras
|
||||
|
||||
# Build ipld-eth-server
|
||||
WORKDIR /go/src/github.com/cerc-io/ipld-eth-server
|
||||
|
||||
ARG GIT_VDBTO_TOKEN
|
||||
WORKDIR /go/src/github.com/vulcanize/ipld-eth-server
|
||||
|
||||
# Cache the modules
|
||||
ENV GO111MODULE=on
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
||||
go mod download && \
|
||||
rm -f ~/.gitconfig
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
# Build the binary
|
||||
RUN GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
|
||||
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
|
||||
|
||||
# Copy migration tool
|
||||
WORKDIR /
|
||||
ARG GOOSE_VER="v2.6.0"
|
||||
ADD https://github.com/pressly/goose/releases/download/${GOOSE_VER}/goose-linux64 ./goose
|
||||
RUN chmod +x ./goose
|
||||
|
||||
# app container
|
||||
FROM alpine
|
||||
|
||||
ARG USER="vdm"
|
||||
ARG CONFIG_FILE="./environments/example.toml"
|
||||
|
||||
RUN adduser -Du 5000 $USER
|
||||
WORKDIR /app
|
||||
@ -37,12 +37,13 @@ USER $USER
|
||||
|
||||
# chown first so dir is writable
|
||||
# note: using $USER is merged, but not in the stable release yet
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/ipld-eth-server/entrypoint.sh .
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-server/$CONFIG_FILE config.toml
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-server/entrypoint.sh .
|
||||
|
||||
|
||||
# keep binaries immutable
|
||||
COPY --from=builder /go/src/github.com/cerc-io/ipld-eth-server/ipld-eth-server ipld-eth-server
|
||||
|
||||
# Allow for debugging
|
||||
COPY --from=debugger /go/bin/dlv /usr/local/bin/
|
||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server
|
||||
COPY --from=builder /goose goose
|
||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments
|
||||
|
||||
ENTRYPOINT ["/app/entrypoint.sh"]
|
||||
|
||||
138
Makefile
Normal file
138
Makefile
Normal file
@ -0,0 +1,138 @@
|
||||
BIN = $(GOPATH)/bin
|
||||
BASE = $(GOPATH)/src/$(PACKAGE)
|
||||
PKGS = go list ./... | grep -v "^vendor/"
|
||||
|
||||
# Tools
|
||||
|
||||
## Migration tool
|
||||
GOOSE = $(BIN)/goose
|
||||
$(BIN)/goose:
|
||||
go get -u github.com/pressly/goose/cmd/goose
|
||||
|
||||
## Source linter
|
||||
LINT = $(BIN)/golint
|
||||
$(BIN)/golint:
|
||||
go get -u golang.org/x/lint/golint
|
||||
|
||||
## Combination linter
|
||||
METALINT = $(BIN)/gometalinter.v2
|
||||
$(BIN)/gometalinter.v2:
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v2
|
||||
$(METALINT) --install
|
||||
|
||||
|
||||
.PHONY: installtools
|
||||
installtools: | $(LINT) $(GOOSE)
|
||||
echo "Installing tools"
|
||||
go mod download
|
||||
|
||||
.PHONY: metalint
|
||||
metalint: | $(METALINT)
|
||||
$(METALINT) ./... --vendor \
|
||||
--fast \
|
||||
--exclude="exported (function)|(var)|(method)|(type).*should have comment or be unexported" \
|
||||
--format="{{.Path.Abs}}:{{.Line}}:{{if .Col}}{{.Col}}{{end}}:{{.Severity}}: {{.Message}} ({{.Linter}})"
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
$(LINT) $$($(PKGS)) | grep -v -E "exported (function)|(var)|(method)|(type).*should have comment or be unexported"
|
||||
|
||||
#Database
|
||||
HOST_NAME = localhost
|
||||
PORT = 5432
|
||||
NAME =
|
||||
USER = postgres
|
||||
PASSWORD = password
|
||||
CONNECT_STRING=postgresql://$(USER):$(PASSWORD)@$(HOST_NAME):$(PORT)/$(NAME)?sslmode=disable
|
||||
|
||||
#Test
|
||||
TEST_DB = vulcanize_testing
|
||||
TEST_CONNECT_STRING = postgresql://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATABASE_HOSTNAME):$(DATABASE_PORT)/$(TEST_DB)?sslmode=disable
|
||||
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
||||
|
||||
.PHONY: test
|
||||
test: | $(GOOSE)
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test
|
||||
|
||||
.PHONY: integrationtest
|
||||
integrationtest: | $(GOOSE)
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
go run github.com/onsi/ginkgo/ginkgo -r test/ -v
|
||||
|
||||
.PHONY: test_local
|
||||
test_local: | $(GOOSE)
|
||||
go vet ./...
|
||||
go fmt ./...
|
||||
./scripts/run_unit_test.sh
|
||||
|
||||
build:
|
||||
go fmt ./...
|
||||
GO111MODULE=on go build
|
||||
|
||||
# Parameter checks
|
||||
## Check that DB variables are provided
|
||||
.PHONY: checkdbvars
|
||||
checkdbvars:
|
||||
test -n "$(HOST_NAME)" # $$HOST_NAME
|
||||
test -n "$(PORT)" # $$PORT
|
||||
test -n "$(NAME)" # $$NAME
|
||||
@echo $(CONNECT_STRING)
|
||||
|
||||
## Check that the migration variable (id/timestamp) is provided
|
||||
.PHONY: checkmigration
|
||||
checkmigration:
|
||||
test -n "$(MIGRATION)" # $$MIGRATION
|
||||
|
||||
# Check that the migration name is provided
|
||||
.PHONY: checkmigname
|
||||
checkmigname:
|
||||
test -n "$(NAME)" # $$NAME
|
||||
|
||||
# Migration operations
|
||||
## Rollback the last migration
|
||||
.PHONY: rollback
|
||||
rollback: $(GOOSE) checkdbvars
|
||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down
|
||||
pg_dump -O -s $(CONNECT_STRING) > db/schema.sql
|
||||
|
||||
|
||||
## Rollback to a select migration (id/timestamp)
|
||||
.PHONY: rollback_to
|
||||
rollback_to: $(GOOSE) checkmigration checkdbvars
|
||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" down-to "$(MIGRATION)"
|
||||
|
||||
## Apply all migrations not already run
|
||||
.PHONY: migrate
|
||||
migrate: $(GOOSE) checkdbvars
|
||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" up
|
||||
pg_dump -O -s $(CONNECT_STRING) > db/schema.sql
|
||||
|
||||
## Create a new migration file
|
||||
.PHONY: new_migration
|
||||
new_migration: $(GOOSE) checkmigname
|
||||
$(GOOSE) -dir db/migrations create $(NAME) sql
|
||||
|
||||
## Check which migrations are applied at the moment
|
||||
.PHONY: migration_status
|
||||
migration_status: $(GOOSE) checkdbvars
|
||||
$(GOOSE) -dir db/migrations postgres "$(CONNECT_STRING)" status
|
||||
|
||||
# Convert timestamped migrations to versioned (to be run in CI);
|
||||
# merge timestamped files to prevent conflict
|
||||
.PHONY: version_migrations
|
||||
version_migrations:
|
||||
$(GOOSE) -dir db/migrations fix
|
||||
|
||||
# Import a psql schema to the database
|
||||
.PHONY: import
|
||||
import:
|
||||
test -n "$(NAME)" # $$NAME
|
||||
psql $(NAME) < db/schema.sql
|
||||
|
||||
## Build docker image
|
||||
.PHONY: docker-build
|
||||
docker-build:
|
||||
docker build -t vulcanize/ipld-eth-server .
|
||||
97
README.md
97
README.md
@ -22,20 +22,20 @@ Additional, unique endpoints are exposed which utilize the new indexes and state
|
||||
|
||||
## Dependencies
|
||||
Minimal build dependencies
|
||||
* Go (1.19)
|
||||
* Go (1.18)
|
||||
* Git
|
||||
* GCC compiler
|
||||
* This repository
|
||||
|
||||
External dependency
|
||||
* Postgres database populated by [ipld-eth-db](https://github.com/cerc-io/ipld-eth-db)
|
||||
* Postgres database populated by [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db)
|
||||
|
||||
## Install
|
||||
Start by downloading ipld-eth-server and moving into the repo:
|
||||
|
||||
`GO111MODULE=off go get -d github.com/cerc-io/ipld-eth-server/v5`
|
||||
`GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server/v4`
|
||||
|
||||
`cd $GOPATH/src/github.com/cerc-io/ipld-eth-server/v5@v5.x.x`
|
||||
`cd $GOPATH/src/github.com/vulcanize/ipld-eth-server/v4@v4.x.x`
|
||||
|
||||
Then, build the binary:
|
||||
|
||||
@ -60,17 +60,18 @@ The corresponding CLI flags can be found with the `./ipld-eth-server serve --hel
|
||||
password = "" # $DATABASE_PASSWORD
|
||||
|
||||
[log]
|
||||
level = "info" # $LOG_LEVEL
|
||||
level = "info" # $LOGRUS_LEVEL
|
||||
|
||||
[server]
|
||||
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SERVER_IPC_PATH
|
||||
wsPath = "127.0.0.1:8081" # $SERVER_WS_PATH
|
||||
httpPath = "127.0.0.1:8082" # $SERVER_HTTP_PATH
|
||||
graphql = true # $SERVER_GRAPHQL
|
||||
graphqlPath = "" # $SERVER_GRAPHQL_PATH
|
||||
graphqlEndpoint = "" # $SERVER_GRAPHQL_ENDPOINT
|
||||
|
||||
[ethereum]
|
||||
chainID = "1" # $ETH_CHAIN_ID
|
||||
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
|
||||
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
|
||||
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
|
||||
nodeID = "arch1" # $ETH_NODE_ID
|
||||
@ -79,9 +80,9 @@ The corresponding CLI flags can be found with the `./ipld-eth-server serve --hel
|
||||
networkID = "1" # $ETH_NETWORK_ID
|
||||
```
|
||||
|
||||
The `database` fields are for connecting to a Postgres database that has been/is being populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
|
||||
The `server` fields set the paths for exposing the ipld-eth-server endpoints
|
||||
The `ethereum` fields set the chainID and default sender address to use for EVM simulation, and can optionally be used to configure a remote eth node to forward cache misses to
|
||||
The `database` fields are for connecting to a Postgres database that has been/is being populated by [ipld-eth-indexer](https://github.com/vulcanize/ipld-eth-indexer)
|
||||
The `server` fields set the paths for exposing the ipld-eth-server endpoints
|
||||
The `ethereum` fields set the chainID and default sender address to use for EVM simulation, and can optionally be used to configure a remote eth node to forward cache misses to
|
||||
|
||||
|
||||
### Endpoints
|
||||
@ -91,35 +92,61 @@ TODO: Port the IPLD RPC subscription endpoints after the decoupling
|
||||
#### Ethereum JSON-RPC
|
||||
ipld-eth-server currently recapitulates portions of the Ethereum JSON-RPC api standard.
|
||||
|
||||
The currently supported standard endpoints are:
|
||||
- `eth_call`
|
||||
- `eth_getBalance`
|
||||
- `eth_getStorageAt`
|
||||
- `eth_getCode`
|
||||
- `eth_getProof`
|
||||
- `eth_blockNumber`
|
||||
- `eth_getHeaderByNumber`
|
||||
- `eth_getHeaderByHash`
|
||||
- `eth_getBlockByNumber`
|
||||
- `eth_getBlockByHash`
|
||||
- `eth_getTransactionCount`
|
||||
- `eth_getBlockTransactionCountByHash`
|
||||
- `eth_getBlockTransactionCountByNumber`
|
||||
- `eth_getTransactionByHash`
|
||||
- `eth_getRawTransactionByHash`
|
||||
- `eth_getTransactionByBlockHashAndIndex`
|
||||
- `eth_getTransactionByBlockNumberAndIndex`
|
||||
- `eth_getRawTransactionByBlockHashAndIndex`
|
||||
- `eth_getRawTransactionByBlockNumberAndIndex`
|
||||
- `eth_getTransactionReceipt`
|
||||
- `eth_getLogs`
|
||||
- `eth_getUncleCountByBlockHash`
|
||||
- `eth_getUncleCountByBlockNumber`
|
||||
- `eth_getUncleByBlockHashAndIndex`
|
||||
- `eth_getUncleByBlockNumberAndIndex`
|
||||
The currently supported standard endpoints are:
|
||||
`eth_call`
|
||||
`eth_getBalance`
|
||||
`eth_getStorageAt`
|
||||
`eth_getCode`
|
||||
`eth_getProof`
|
||||
`eth_blockNumber`
|
||||
`eth_getHeaderByNumber`
|
||||
`eth_getHeaderByHash`
|
||||
`eth_getBlockByNumber`
|
||||
`eth_getBlockByHash`
|
||||
`eth_getTransactionCount`
|
||||
`eth_getBlockTransactionCountByHash`
|
||||
`eth_getBlockTransactionCountByNumber`
|
||||
`eth_getTransactionByHash`
|
||||
`eth_getRawTransactionByHash`
|
||||
`eth_getTransactionByBlockHashAndIndex`
|
||||
`eth_getTransactionByBlockNumberAndIndex`
|
||||
`eth_getRawTransactionByBlockHashAndIndex`
|
||||
`eth_getRawTransactionByBlockNumberAndIndex`
|
||||
`eth_getTransactionReceipt`
|
||||
`eth_getLogs`
|
||||
`eth_getUncleCountByBlockHash`
|
||||
`eth_getUncleCountByBlockNumber`
|
||||
`eth_getUncleByBlockHashAndIndex`
|
||||
`eth_getUncleByBlockNumberAndIndex`
|
||||
|
||||
TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice)
|
||||
|
||||
|
||||
### CLI Options and Environment variables
|
||||
|
||||
|
||||
| CLI Option | Environment Variable | Default Value | Comment |
|
||||
| ----------------------------- | ----------------------------- | ---------------- | ----------------------------------- |
|
||||
| `database-hostname` | `DATABASE_HOSTNAME` | localhost | IPLD database host |
|
||||
| `database-port` | `DATABASE_PORT` | 5432 | IPLD database port |
|
||||
| `database-name` | `DATABASE_NAME` | vulcanize_public | IPLD database name |
|
||||
| `database-user` | `DATABASE_USER` | | IPLD database user |
|
||||
| `database-password` | `DATABASE_PASSWORD` | | IPLD database password |
|
||||
| `eth-server-graphql` | `ETH_SERVER_GRAPHQL` | false | If `true` enable Eth GraphQL Server |
|
||||
| `eth-server-graphql-path` | `ETH_SERVER_GRAPHQLPATH` | | If `eth-server-graphql` set to true, endpoint url for graphql server (host:port) |
|
||||
| `eth-server-http` | `ETH_SERVER_HTTP` | true | If `true` enable Eth HTTP JSON-RPC Server |
|
||||
| `eth-server-http-path` | `ETH_SERVER_HTTPPATH` | | If `eth-server-http` set to `true`, endpoint url for Eth HTTP JSON-RPC server (host:port) |
|
||||
| `eth-server-ws` | `ETH_SERVER_WS` | false | If `true` enable Eth WS JSON-RPC Server |
|
||||
| `eth-server-ws-path` | `ETH_SERVER_WSPATH` | | If `eth-server-ws` set to `true`, endpoint url for Eth WS JSON-RPC server (host:port) |
|
||||
| `eth-server-ipc` | `ETH_SERVER_IPC` | false | If `true` enable Eth IPC JSON-RPC Server |
|
||||
| `eth-server-ipc-path` | `ETH_SERVER_IPC_PATH` | | If `eth-server-ws` set to `true`, path for Eth IPC JSON-RPC server |
|
||||
| `ipld-server-graphql` | `IPLD_SERVER_GRAPHQL` | false | If `true` enable IPLD GraphQL Server |
|
||||
| `ipld-server-graphql-path` | `IPLD_SERVER_GRAPHQLPATH` | | If `ipld-server-graphql` set to true, endpoint url for graphql server (host:port) |
|
||||
| `ipld-postgraphile-path` | `IPLD_POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of IPLD db |
|
||||
| `tracing-http-path` | `TRACING_HTTPPATH` | | If `ipld-server-graphql` set to true, http url for tracing server |
|
||||
| `tracing-postgraphile-path` | `TRACING.POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of tracing db |
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
Follow steps in [test/README.md](./test/README.md)
|
||||
|
||||
16
chain.json
Normal file
16
chain.json
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"chainId": 4,
|
||||
"homesteadBlock": 1,
|
||||
"eip150Block": 2,
|
||||
"eip150Hash": "0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9",
|
||||
"eip155Block": 3,
|
||||
"eip158Block": 3,
|
||||
"byzantiumBlock": 3,
|
||||
"constantinopleBlock": 3,
|
||||
"petersburgBlock": 3,
|
||||
"istanbulBlock": 3,
|
||||
"clique": {
|
||||
"period": 15,
|
||||
"epoch": 30000
|
||||
}
|
||||
}
|
||||
40
cmd/root.go
40
cmd/root.go
@ -18,15 +18,16 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -49,7 +50,24 @@ func Execute() {
|
||||
}
|
||||
|
||||
func initFuncs(cmd *cobra.Command, args []string) {
|
||||
log.Init()
|
||||
viper.BindEnv("log.file", "LOGRUS_FILE")
|
||||
logfile := viper.GetString("log.file")
|
||||
if logfile != "" {
|
||||
file, err := os.OpenFile(logfile,
|
||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err == nil {
|
||||
log.Infof("Directing output to %s", logfile)
|
||||
log.SetOutput(file)
|
||||
} else {
|
||||
log.SetOutput(os.Stdout)
|
||||
log.Info("Failed to log to file, using default stdout")
|
||||
}
|
||||
} else {
|
||||
log.SetOutput(os.Stdout)
|
||||
}
|
||||
if err := logLevel(); err != nil {
|
||||
log.Fatal("Could not set log level: ", err)
|
||||
}
|
||||
|
||||
if viper.GetBool("metrics") {
|
||||
prom.Init()
|
||||
@ -65,6 +83,20 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
func logLevel() error {
|
||||
viper.BindEnv("log.level", "LOGRUS_LEVEL")
|
||||
lvl, err := log.ParseLevel(viper.GetString("log.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.SetLevel(lvl)
|
||||
if lvl > log.InfoLevel {
|
||||
log.SetReportCaller(true)
|
||||
}
|
||||
log.Info("Log level set to ", lvl.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
@ -95,7 +127,7 @@ func init() {
|
||||
|
||||
func initConfig() {
|
||||
if cfgFile == "" && envFile == "" {
|
||||
log.Warn("No configuration file specified, use --config , --env flag to provide configuration")
|
||||
log.Fatal("No configuration file specified, use --config , --env flag to provide configuration")
|
||||
}
|
||||
|
||||
if cfgFile != "" {
|
||||
|
||||
177
cmd/serve.go
177
cmd/serve.go
@ -25,16 +25,18 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/mailgun/groupcache/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/vulcanize/gap-filler/pkg/mux"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/graphql"
|
||||
srpc "github.com/cerc-io/ipld-eth-server/v5/pkg/rpc"
|
||||
s "github.com/cerc-io/ipld-eth-server/v5/pkg/serve"
|
||||
v "github.com/cerc-io/ipld-eth-server/v5/version"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/graphql"
|
||||
srpc "github.com/vulcanize/ipld-eth-server/v4/pkg/rpc"
|
||||
s "github.com/vulcanize/ipld-eth-server/v4/pkg/serve"
|
||||
v "github.com/vulcanize/ipld-eth-server/v4/version"
|
||||
)
|
||||
|
||||
var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
|
||||
@ -43,7 +45,9 @@ var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "serve chain data from PG-IPFS",
|
||||
Long: `This command configures a VulcanizeDB ipld-eth-server.`,
|
||||
Long: `This command configures a VulcanizeDB ipld-eth-server.
|
||||
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
@ -52,30 +56,25 @@ var serveCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func serve() {
|
||||
logWithCommand.Infof("ipld-eth-server version: %s", v.VersionWithMeta)
|
||||
logWithCommand.Infof("running ipld-eth-server version: %s", v.VersionWithMeta)
|
||||
|
||||
var forwardPayloadChan chan eth.ConvertedPayload
|
||||
wg := new(sync.WaitGroup)
|
||||
logWithCommand.Debug("loading server configuration variables")
|
||||
serverConfig, err := s.NewConfig()
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
logWithCommand.Debugf("server config: %+v", serverConfig)
|
||||
logWithCommand.Debugf("chain config: %+v", serverConfig.ChainConfig)
|
||||
logWithCommand.Infof("server config: %+v", serverConfig)
|
||||
logWithCommand.Debug("initializing new server service")
|
||||
server, err := s.NewServer(serverConfig)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
if serverConfig.ForwardEthCalls {
|
||||
logWithCommand.Info("Fowarding eth_call")
|
||||
}
|
||||
if serverConfig.ForwardGetStorageAt {
|
||||
logWithCommand.Info("Fowarding eth_getStorageAt")
|
||||
}
|
||||
if serverConfig.ProxyOnError {
|
||||
logWithCommand.Info("Proxy on error is enabled")
|
||||
}
|
||||
|
||||
server.Serve(wg)
|
||||
logWithCommand.Info("starting up server servers")
|
||||
forwardPayloadChan = make(chan eth.ConvertedPayload, s.PayloadChanBufferSize)
|
||||
server.Serve(wg, forwardPayloadChan)
|
||||
if err := startServers(server, serverConfig); err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
@ -84,6 +83,11 @@ func serve() {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
err = startIpldGraphQL(serverConfig)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
err = startGroupCacheService(serverConfig)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
@ -93,7 +97,7 @@ func serve() {
|
||||
go startStateTrieValidator(serverConfig, server)
|
||||
logWithCommand.Info("state validator enabled")
|
||||
} else {
|
||||
logWithCommand.Debug("state validator disabled")
|
||||
logWithCommand.Info("state validator disabled")
|
||||
}
|
||||
|
||||
shutdown := make(chan os.Signal, 1)
|
||||
@ -108,33 +112,33 @@ func serve() {
|
||||
|
||||
func startServers(server s.Server, settings *s.Config) error {
|
||||
if settings.IPCEnabled {
|
||||
logWithCommand.Debug("starting up IPC server")
|
||||
logWithCommand.Info("starting up IPC server")
|
||||
_, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logWithCommand.Debug("IPC server is disabled")
|
||||
logWithCommand.Info("IPC server is disabled")
|
||||
}
|
||||
|
||||
if settings.WSEnabled {
|
||||
logWithCommand.Debug("starting up WS server")
|
||||
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil)
|
||||
logWithCommand.Info("starting up WS server")
|
||||
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logWithCommand.Debug("WS server is disabled")
|
||||
logWithCommand.Info("WS server is disabled")
|
||||
}
|
||||
|
||||
if settings.HTTPEnabled {
|
||||
logWithCommand.Debug("starting up HTTP server")
|
||||
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "debug", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
logWithCommand.Info("starting up HTTP server")
|
||||
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logWithCommand.Debug("HTTP server is disabled")
|
||||
logWithCommand.Info("HTTP server is disabled")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -142,7 +146,7 @@ func startServers(server s.Server, settings *s.Config) error {
|
||||
|
||||
func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphql.Service, err error) {
|
||||
if settings.EthGraphqlEnabled {
|
||||
logWithCommand.Debug("starting up ETH GraphQL server")
|
||||
logWithCommand.Info("starting up ETH GraphQL server")
|
||||
endPoint := settings.EthGraphqlEndpoint
|
||||
if endPoint != "" {
|
||||
graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
@ -152,17 +156,69 @@ func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphq
|
||||
err = graphQLServer.Start(nil)
|
||||
}
|
||||
} else {
|
||||
logWithCommand.Debug("ETH GraphQL server is disabled")
|
||||
logWithCommand.Info("ETH GraphQL server is disabled")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func startIpldGraphQL(settings *s.Config) error {
|
||||
if settings.IpldGraphqlEnabled {
|
||||
logWithCommand.Info("starting up IPLD GraphQL server")
|
||||
|
||||
gqlIpldAddr, err := url.Parse(settings.IpldPostgraphileEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gqlTracingAPIAddr, err := url.Parse(settings.TracingPostgraphileEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ethClients, err := parseRpcAddresses(settings.EthHttpEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var tracingClients []*rpc.Client
|
||||
tracingEndpoint := viper.GetString("tracing.httpPath")
|
||||
if tracingEndpoint != "" {
|
||||
tracingClients, err = parseRpcAddresses(tracingEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
router, err := mux.NewServeMux(&mux.Options{
|
||||
BasePath: "/",
|
||||
EnableGraphiQL: true,
|
||||
Postgraphile: mux.PostgraphileOptions{
|
||||
Default: gqlIpldAddr,
|
||||
TracingAPI: gqlTracingAPIAddr,
|
||||
},
|
||||
RPC: mux.RPCOptions{
|
||||
DefaultClients: ethClients,
|
||||
TracingClients: tracingClients,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go http.ListenAndServe(settings.IpldGraphqlEndpoint, router)
|
||||
} else {
|
||||
logWithCommand.Info("IPLD GraphQL server is disabled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startGroupCacheService(settings *s.Config) error {
|
||||
gcc := settings.GroupCache
|
||||
|
||||
if gcc.Pool.Enabled {
|
||||
logWithCommand.Debug("starting up groupcache pool HTTTP server")
|
||||
logWithCommand.Info("starting up groupcache pool HTTTP server")
|
||||
|
||||
pool := groupcache.NewHTTPPoolOpts(gcc.Pool.HttpEndpoint, &groupcache.HTTPPoolOptions{})
|
||||
pool.Set(gcc.Pool.PeerHttpEndpoints...)
|
||||
@ -180,9 +236,9 @@ func startGroupCacheService(settings *s.Config) error {
|
||||
// Start a HTTP server to listen for peer requests from the groupcache
|
||||
go server.ListenAndServe()
|
||||
|
||||
logWithCommand.Infof("groupcache pool endpoint opened at %s", httpURL)
|
||||
logWithCommand.Infof("groupcache pool endpoint opened for url %s", httpURL)
|
||||
} else {
|
||||
logWithCommand.Debug("Groupcache pool is disabled")
|
||||
logWithCommand.Info("Groupcache pool is disabled")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -199,7 +255,7 @@ func startStateTrieValidator(config *s.Config, server s.Server) {
|
||||
|
||||
block, err := backend.CurrentBlock()
|
||||
if err != nil {
|
||||
log.Error("Error fetching current block for state trie validator")
|
||||
log.Errorln("Error fetching current block for state trie validator")
|
||||
continue
|
||||
}
|
||||
|
||||
@ -263,24 +319,31 @@ func init() {
|
||||
|
||||
// flags for all config variables
|
||||
// eth graphql and json-rpc parameters
|
||||
serveCmd.PersistentFlags().Bool("server-graphql", false, "turn on the eth graphql server")
|
||||
serveCmd.PersistentFlags().String("server-graphql-path", "", "endpoint url for eth graphql server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("server-http", true, "turn on the eth http json-rpc server")
|
||||
serveCmd.PersistentFlags().String("server-http-path", "", "endpoint url for eth http json-rpc server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("server-ws", false, "turn on the eth websocket json-rpc server")
|
||||
serveCmd.PersistentFlags().String("server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("server-ipc", false, "turn on the eth ipc json-rpc server")
|
||||
serveCmd.PersistentFlags().String("server-ipc-path", "", "path for eth ipc json-rpc server")
|
||||
serveCmd.PersistentFlags().Bool("eth-server-graphql", false, "turn on the eth graphql server")
|
||||
serveCmd.PersistentFlags().String("eth-server-graphql-path", "", "endpoint url for eth graphql server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("eth-server-http", true, "turn on the eth http json-rpc server")
|
||||
serveCmd.PersistentFlags().String("eth-server-http-path", "", "endpoint url for eth http json-rpc server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("eth-server-ws", false, "turn on the eth websocket json-rpc server")
|
||||
serveCmd.PersistentFlags().String("eth-server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
|
||||
serveCmd.PersistentFlags().Bool("eth-server-ipc", false, "turn on the eth ipc json-rpc server")
|
||||
serveCmd.PersistentFlags().String("eth-server-ipc-path", "", "path for eth ipc json-rpc server")
|
||||
|
||||
// ipld and tracing graphql parameters
|
||||
serveCmd.PersistentFlags().Bool("ipld-server-graphql", false, "turn on the ipld graphql server")
|
||||
serveCmd.PersistentFlags().String("ipld-server-graphql-path", "", "endpoint url for ipld graphql server (host:port)")
|
||||
serveCmd.PersistentFlags().String("ipld-postgraphile-path", "", "http url to postgraphile on top of ipld database")
|
||||
serveCmd.PersistentFlags().String("tracing-http-path", "", "http url to tracing service")
|
||||
serveCmd.PersistentFlags().String("tracing-postgraphile-path", "", "http url to postgraphile on top of tracing db")
|
||||
|
||||
serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
|
||||
serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
|
||||
serveCmd.PersistentFlags().String("eth-client-name", "Geth", "eth client name")
|
||||
serveCmd.PersistentFlags().String("eth-genesis-block", "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", "eth genesis block hash")
|
||||
serveCmd.PersistentFlags().String("eth-network-id", "1", "eth network id")
|
||||
serveCmd.PersistentFlags().String("eth-chain-id", "", "eth chain id")
|
||||
serveCmd.PersistentFlags().String("eth-chain-config", "", "json chain config file location")
|
||||
serveCmd.PersistentFlags().String("eth-chain-id", "1", "eth chain id")
|
||||
serveCmd.PersistentFlags().String("eth-default-sender", "", "default sender address")
|
||||
serveCmd.PersistentFlags().String("eth-rpc-gas-cap", "", "rpc gas cap (for eth_Call execution)")
|
||||
serveCmd.PersistentFlags().String("eth-chain-config", "", "json chain config file location")
|
||||
serveCmd.PersistentFlags().Bool("eth-supports-state-diff", false, "whether the proxy ethereum client supports statediffing endpoints")
|
||||
serveCmd.PersistentFlags().Bool("eth-forward-eth-calls", false, "whether to immediately forward eth_calls to proxy client")
|
||||
serveCmd.PersistentFlags().Bool("eth-proxy-on-error", true, "whether to forward all failed calls to proxy client")
|
||||
@ -299,20 +362,27 @@ func init() {
|
||||
|
||||
// and their bindings
|
||||
// eth graphql server
|
||||
viper.BindPFlag("server.graphql", serveCmd.PersistentFlags().Lookup("server-graphql"))
|
||||
viper.BindPFlag("server.graphqlPath", serveCmd.PersistentFlags().Lookup("server-graphql-path"))
|
||||
viper.BindPFlag("eth.server.graphql", serveCmd.PersistentFlags().Lookup("eth-server-graphql"))
|
||||
viper.BindPFlag("eth.server.graphqlPath", serveCmd.PersistentFlags().Lookup("eth-server-graphql-path"))
|
||||
|
||||
// eth http json-rpc server
|
||||
viper.BindPFlag("server.http", serveCmd.PersistentFlags().Lookup("server-http"))
|
||||
viper.BindPFlag("server.httpPath", serveCmd.PersistentFlags().Lookup("server-http-path"))
|
||||
viper.BindPFlag("eth.server.http", serveCmd.PersistentFlags().Lookup("eth-server-http"))
|
||||
viper.BindPFlag("eth.server.httpPath", serveCmd.PersistentFlags().Lookup("eth-server-http-path"))
|
||||
|
||||
// eth websocket json-rpc server
|
||||
viper.BindPFlag("server.ws", serveCmd.PersistentFlags().Lookup("server-ws"))
|
||||
viper.BindPFlag("server.wsPath", serveCmd.PersistentFlags().Lookup("server-ws-path"))
|
||||
viper.BindPFlag("eth.server.ws", serveCmd.PersistentFlags().Lookup("eth-server-ws"))
|
||||
viper.BindPFlag("eth.server.wsPath", serveCmd.PersistentFlags().Lookup("eth-server-ws-path"))
|
||||
|
||||
// eth ipc json-rpc server
|
||||
viper.BindPFlag("server.ipc", serveCmd.PersistentFlags().Lookup("server-ipc"))
|
||||
viper.BindPFlag("server.ipcPath", serveCmd.PersistentFlags().Lookup("server-ipc-path"))
|
||||
viper.BindPFlag("eth.server.ipc", serveCmd.PersistentFlags().Lookup("eth-server-ipc"))
|
||||
viper.BindPFlag("eth.server.ipcPath", serveCmd.PersistentFlags().Lookup("eth-server-ipc-path"))
|
||||
|
||||
// ipld and tracing graphql parameters
|
||||
viper.BindPFlag("ipld.server.graphql", serveCmd.PersistentFlags().Lookup("ipld-server-graphql"))
|
||||
viper.BindPFlag("ipld.server.graphqlPath", serveCmd.PersistentFlags().Lookup("ipld-server-graphql-path"))
|
||||
viper.BindPFlag("ipld.postgraphilePath", serveCmd.PersistentFlags().Lookup("ipld-postgraphile-path"))
|
||||
viper.BindPFlag("tracing.httpPath", serveCmd.PersistentFlags().Lookup("tracing-http-path"))
|
||||
viper.BindPFlag("tracing.postgraphilePath", serveCmd.PersistentFlags().Lookup("tracing-postgraphile-path"))
|
||||
|
||||
viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path"))
|
||||
viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id"))
|
||||
@ -320,13 +390,12 @@ func init() {
|
||||
viper.BindPFlag("ethereum.genesisBlock", serveCmd.PersistentFlags().Lookup("eth-genesis-block"))
|
||||
viper.BindPFlag("ethereum.networkID", serveCmd.PersistentFlags().Lookup("eth-network-id"))
|
||||
viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id"))
|
||||
viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender"))
|
||||
viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap"))
|
||||
viper.BindPFlag("ethereum.chainConfig", serveCmd.PersistentFlags().Lookup("eth-chain-config"))
|
||||
viper.BindPFlag("ethereum.supportsStateDiff", serveCmd.PersistentFlags().Lookup("eth-supports-state-diff"))
|
||||
viper.BindPFlag("ethereum.forwardEthCalls", serveCmd.PersistentFlags().Lookup("eth-forward-eth-calls"))
|
||||
viper.BindPFlag("ethereum.forwardGetStorageAt", serveCmd.PersistentFlags().Lookup("eth-forward-get-storage-at"))
|
||||
viper.BindPFlag("ethereum.proxyOnError", serveCmd.PersistentFlags().Lookup("eth-proxy-on-error"))
|
||||
viper.BindPFlag("ethereum.getLogsBlockLimit", serveCmd.PersistentFlags().Lookup("eth-getlogs-block-limit"))
|
||||
|
||||
// groupcache flags
|
||||
viper.BindPFlag("groupcache.pool.enabled", serveCmd.PersistentFlags().Lookup("gcache-pool-enabled"))
|
||||
|
||||
172
cmd/subscribe.go
Normal file
172
cmd/subscribe.go
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright © 2019 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
w "github.com/vulcanize/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
// subscribeCmd represents the subscribe command
|
||||
var subscribeCmd = &cobra.Command{
|
||||
Use: "subscribe",
|
||||
Short: "This command is used to subscribe to the eth ipfs watcher data stream with the provided filters",
|
||||
Long: `This command is for demo and testing purposes and is used to subscribe to the watcher with the provided subscription configuration parameters.
|
||||
It does not do anything with the data streamed from the watcher other than unpack it and print it out for demonstration purposes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
subscribe()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(subscribeCmd)
|
||||
}
|
||||
|
||||
func subscribe() {
|
||||
// Prep the subscription config/filters to be sent to the server
|
||||
ethSubConfig, err := eth.NewEthSubscriptionConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a new rpc client and a subscription streamer with that client
|
||||
rpcClient, err := getRPCClient()
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
subClient := client.NewClient(rpcClient)
|
||||
|
||||
// Buffered channel for reading subscription payloads
|
||||
payloadChan := make(chan w.SubscriptionPayload, 20000)
|
||||
|
||||
// Subscribe to the watcher service with the given config/filter parameters
|
||||
sub, err := subClient.Stream(payloadChan, *ethSubConfig)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
logWithCommand.Info("awaiting payloads")
|
||||
// Receive response payloads and print out the results
|
||||
for {
|
||||
select {
|
||||
case payload := <-payloadChan:
|
||||
if payload.Err != "" {
|
||||
logWithCommand.Error(payload.Err)
|
||||
continue
|
||||
}
|
||||
var ethData eth.IPLDs
|
||||
if err := rlp.DecodeBytes(payload.Data, ðData); err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
var header types.Header
|
||||
err = rlp.Decode(bytes.NewBuffer(ethData.Header.Data), &header)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
|
||||
fmt.Printf("header: %v\n", header)
|
||||
for _, trxRlp := range ethData.Transactions {
|
||||
var trx types.Transaction
|
||||
buff := bytes.NewBuffer(trxRlp.Data)
|
||||
stream := rlp.NewStream(buff, 0)
|
||||
err := trx.DecodeRLP(stream)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
|
||||
fmt.Printf("trx: %v\n", trx)
|
||||
}
|
||||
for _, rctRlp := range ethData.Receipts {
|
||||
var rct types.Receipt
|
||||
buff := bytes.NewBuffer(rctRlp.Data)
|
||||
stream := rlp.NewStream(buff, 0)
|
||||
err = rct.DecodeRLP(stream)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Receipt with block hash %s, trx hash %s\n", rct.BlockHash.Hex(), rct.TxHash.Hex())
|
||||
fmt.Printf("rct: %v\n", rct)
|
||||
for _, l := range rct.Logs {
|
||||
if len(l.Topics) < 1 {
|
||||
logWithCommand.Error(fmt.Sprintf("log only has %d topics", len(l.Topics)))
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Log for block hash %s, trx hash %s, address %s, and with topic0 %s\n",
|
||||
l.BlockHash.Hex(), l.TxHash.Hex(), l.Address.Hex(), l.Topics[0].Hex())
|
||||
fmt.Printf("log: %v\n", l)
|
||||
}
|
||||
}
|
||||
// This assumes leafs only
|
||||
for _, stateNode := range ethData.StateNodes {
|
||||
var acct types.StateAccount
|
||||
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Account for key %s, and root %s, with balance %s\n",
|
||||
stateNode.StateLeafKey.Hex(), acct.Root.Hex(), acct.Balance.String())
|
||||
fmt.Printf("state account: %+v\n", acct)
|
||||
}
|
||||
for _, storageNode := range ethData.StorageNodes {
|
||||
fmt.Printf("Storage for state key %s ", storageNode.StateLeafKey.Hex())
|
||||
fmt.Printf("with storage key %s\n", storageNode.StorageLeafKey.Hex())
|
||||
var i []interface{}
|
||||
err := rlp.DecodeBytes(storageNode.IPLD.Data, &i)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
// if a value node
|
||||
if len(i) == 1 {
|
||||
valueBytes, ok := i[0].([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
|
||||
storageNode.StorageLeafKey.Hex(), common.BytesToHash(valueBytes).Hex())
|
||||
}
|
||||
}
|
||||
case err = <-sub.Err():
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getRPCClient() (*rpc.Client, error) {
|
||||
vulcPath := viper.GetString("watcher.ethSubscription.wsPath")
|
||||
if vulcPath == "" {
|
||||
vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
|
||||
}
|
||||
return rpc.Dial(vulcPath)
|
||||
}
|
||||
87
cmd/validate.go
Normal file
87
cmd/validate.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright © 2021 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
validator "github.com/vulcanize/eth-ipfs-state-validator/v4/pkg"
|
||||
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v4/postgres"
|
||||
|
||||
s "github.com/vulcanize/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
const GroupName = "statedb-validate"
|
||||
const CacheExpiryInMins = 8 * 60 // 8 hours
|
||||
const CacheSizeInMB = 16 // 16 MB
|
||||
|
||||
var validateCmd = &cobra.Command{
|
||||
Use: "validate",
|
||||
Short: "valdiate state",
|
||||
Long: `This command validates the trie for the given state root`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
validate()
|
||||
},
|
||||
}
|
||||
|
||||
func validate() {
|
||||
config, err := s.NewConfig()
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
stateRootStr := viper.GetString("stateRoot")
|
||||
if stateRootStr == "" {
|
||||
logWithCommand.Fatal("must provide a state root for state validation")
|
||||
}
|
||||
|
||||
stateRoot := common.HexToHash(stateRootStr)
|
||||
cacheSize := viper.GetInt("cacheSize")
|
||||
|
||||
ethDB := ipfsethdb.NewDatabase(config.DB, ipfsethdb.CacheConfig{
|
||||
Name: GroupName,
|
||||
Size: cacheSize * 1024 * 1024,
|
||||
ExpiryDuration: time.Minute * time.Duration(CacheExpiryInMins),
|
||||
})
|
||||
|
||||
val := validator.NewValidator(nil, ethDB)
|
||||
if err = val.ValidateTrie(stateRoot); err != nil {
|
||||
log.Fatalln("Error validating state root")
|
||||
}
|
||||
|
||||
stats := ethDB.(*ipfsethdb.Database).GetCacheStats()
|
||||
log.Debugf("groupcache stats %+v", stats)
|
||||
|
||||
log.Infoln("Successfully validated state root")
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(validateCmd)
|
||||
|
||||
addDatabaseFlags(validateCmd)
|
||||
|
||||
validateCmd.PersistentFlags().String("state-root", "", "root of the state trie we wish to validate")
|
||||
viper.BindPFlag("stateRoot", validateCmd.PersistentFlags().Lookup("state-root"))
|
||||
|
||||
validateCmd.PersistentFlags().Int("cache-size", CacheSizeInMB, "cache size in MB")
|
||||
viper.BindPFlag("cacheSize", validateCmd.PersistentFlags().Lookup("cache-size"))
|
||||
}
|
||||
@ -16,17 +16,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
v "github.com/cerc-io/ipld-eth-server/v5/version"
|
||||
v "github.com/vulcanize/ipld-eth-server/v4/version"
|
||||
)
|
||||
|
||||
// versionCmd represents the version command
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Prints the version of ipld-eth-server",
|
||||
Long: `Use this command to fetch the version of ipld-eth-server`,
|
||||
Long: `Use this command to fetch the version of ipld-eth-server
|
||||
|
||||
Usage: ./ipld-eth-server version`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
|
||||
12
docker-compose.test.yml
Normal file
12
docker-compose.test.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
contract:
|
||||
build:
|
||||
context: ./test/contract
|
||||
args:
|
||||
ETH_ADDR: "http://go-ethereum:8545"
|
||||
environment:
|
||||
ETH_ADDR: "http://go-ethereum:8545"
|
||||
ports:
|
||||
- "127.0.0.1:3000:3000"
|
||||
59
docker-compose.yml
Normal file
59
docker-compose.yml
Normal file
@ -0,0 +1,59 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
migrations:
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: vulcanize/ipld-eth-db:v4.2.0-alpha
|
||||
environment:
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_NAME: "vulcanize_testing"
|
||||
DATABASE_PASSWORD: "password"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
|
||||
ipld-eth-db:
|
||||
image: timescale/timescaledb:latest-pg14
|
||||
restart: always
|
||||
command: ["postgres", "-c", "log_statement=all"]
|
||||
environment:
|
||||
POSTGRES_USER: "vdbm"
|
||||
POSTGRES_DB: "vulcanize_testing"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
ports:
|
||||
- "127.0.0.1:8077:5432"
|
||||
|
||||
eth-server:
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
build:
|
||||
context: ./
|
||||
cache_from:
|
||||
- alpine:latest
|
||||
- golang:1.13-alpine
|
||||
environment:
|
||||
IPLD_SERVER_GRAPHQL: "true"
|
||||
IPLD_POSTGRAPHILEPATH: http://graphql:5000
|
||||
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
|
||||
VDB_COMMAND: "serve"
|
||||
ETH_CHAIN_CONFIG: "/tmp/chain.json"
|
||||
DATABASE_NAME: "vulcanize_testing"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_PASSWORD: "password"
|
||||
ETH_CHAIN_ID: 4
|
||||
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS
|
||||
ETH_PROXY_ON_ERROR: $ETH_PROXY_ON_ERROR
|
||||
ETH_HTTP_PATH: $ETH_HTTP_PATH
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./chain.json
|
||||
target: /tmp/chain.json
|
||||
ports:
|
||||
- "127.0.0.1:8081:8081"
|
||||
|
||||
volumes:
|
||||
vdb_db_eth_server:
|
||||
@ -40,9 +40,9 @@ An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-s
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/client"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/watch"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/watch"
|
||||
)
|
||||
|
||||
config, _ := eth.NewEthSubscriptionConfig()
|
||||
@ -160,9 +160,9 @@ An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-se
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v5/pkg/btc"
|
||||
"github.com/vulcanize/ipld-eth-server/v5/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v5/pkg/watch"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/btc"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/client"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/watch"
|
||||
)
|
||||
|
||||
config, _ := btc.NewBtcSubscriptionConfig()
|
||||
|
||||
@ -1,8 +1,12 @@
|
||||
#!/bin/sh
|
||||
|
||||
START_CMD="./ipld-eth-server"
|
||||
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
|
||||
START_CMD="/usr/local/bin/dlv --listen=:40000 --headless=true --api-version=2 --accept-multiclient exec `pwd`/ipld-eth-server --continue --"
|
||||
fi
|
||||
echo "Beginning the ipld-eth-server process"
|
||||
|
||||
exec $START_CMD ${VDB_COMMAND:-serve}
|
||||
echo running: ./ipld-eth-server ${VDB_COMMAND} --config=config.toml
|
||||
./ipld-eth-server ${VDB_COMMAND} --config=config.toml
|
||||
rv=$?
|
||||
|
||||
if [ $rv != 0 ]; then
|
||||
echo "ipld-eth-server startup failed"
|
||||
exit 1
|
||||
fi
|
||||
@ -1,30 +1,27 @@
|
||||
[database]
|
||||
name = "cerc_testing" # $DATABASE_NAME
|
||||
name = "vulcanize_public" # $DATABASE_NAME
|
||||
hostname = "localhost" # $DATABASE_HOSTNAME
|
||||
port = 5432 # $DATABASE_PORT
|
||||
user = "postgres" # $DATABASE_USER
|
||||
password = "" # $DATABASE_PASSWORD
|
||||
|
||||
[log]
|
||||
level = "info" # $LOG_LEVEL
|
||||
level = "info" # $LOGRUS_LEVEL
|
||||
|
||||
[server]
|
||||
ipc = false
|
||||
ipcPath = "~/.vulcanize/vulcanize.ipc" # $SERVER_IPC_PATH
|
||||
ws = true
|
||||
wsPath = "127.0.0.1:8080" # $SERVER_WS_PATH
|
||||
http = true
|
||||
httpPath = "127.0.0.1:8081" # $SERVER_HTTP_PATH
|
||||
wsPath = "127.0.0.1:8081" # $SERVER_WS_PATH
|
||||
httpPath = "127.0.0.1:8082" # $SERVER_HTTP_PATH
|
||||
graphql = true # $SERVER_GRAPHQL
|
||||
graphqlPath = "127.0.0.1:8082" # $SERVER_GRAPHQL_PATH
|
||||
graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT
|
||||
|
||||
[ethereum]
|
||||
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
||||
chainID = "1" # $ETH_CHAIN_ID
|
||||
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
|
||||
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
|
||||
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
|
||||
supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
|
||||
stateDiffTimeout = "4m" # $ETH_STATEDIFF_TIMEOUT
|
||||
forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS
|
||||
proxyOnError = true # $ETH_PROXY_ON_ERROR
|
||||
nodeID = "arch1" # $ETH_NODE_ID
|
||||
|
||||
30
environments/subscribeExample.toml
Normal file
30
environments/subscribeExample.toml
Normal file
@ -0,0 +1,30 @@
|
||||
[watcher]
|
||||
[watcher.ethSubscription]
|
||||
historicalData = false
|
||||
historicalDataOnly = false
|
||||
startingBlock = 0
|
||||
endingBlock = 0
|
||||
wsPath = "ws://127.0.0.1:8080"
|
||||
[watcher.ethSubscription.headerFilter]
|
||||
off = false
|
||||
uncles = false
|
||||
[watcher.ethSubscription.txFilter]
|
||||
off = false
|
||||
src = []
|
||||
dst = []
|
||||
[watcher.ethSubscription.receiptFilter]
|
||||
off = false
|
||||
contracts = []
|
||||
topic0s = []
|
||||
topic1s = []
|
||||
topic2s = []
|
||||
topic3s = []
|
||||
[watcher.ethSubscription.stateFilter]
|
||||
off = false
|
||||
addresses = []
|
||||
intermediateNodes = false
|
||||
[watcher.ethSubscription.storageFilter]
|
||||
off = true
|
||||
addresses = []
|
||||
storageKeys = []
|
||||
intermediateNodes = false
|
||||
@ -1,5 +1,5 @@
|
||||
[database]
|
||||
name = "cerc_testing"
|
||||
name = "vulcanize_testing"
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
|
||||
|
||||
460
go.mod
460
go.mod
@ -1,296 +1,292 @@
|
||||
module github.com/cerc-io/ipld-eth-server/v5
|
||||
module github.com/vulcanize/ipld-eth-server/v4
|
||||
|
||||
go 1.21
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/cerc-io/eth-ipfs-state-validator/v5 v5.2.0-alpha
|
||||
github.com/cerc-io/eth-iterator-utils v0.3.1
|
||||
github.com/cerc-io/ipfs-ethdb/v5 v5.1.0-alpha
|
||||
github.com/cerc-io/ipld-eth-statedb v0.1.1
|
||||
github.com/cerc-io/plugeth-statediff v0.3.2
|
||||
github.com/ethereum/go-ethereum v1.13.14
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/ethereum/go-ethereum v1.10.19
|
||||
github.com/graph-gophers/graphql-go v1.3.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/ipfs/go-cid v0.4.1
|
||||
github.com/ipfs/go-block-format v0.0.3
|
||||
github.com/ipfs/go-cid v0.0.7
|
||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/machinebox/graphql v0.2.2
|
||||
github.com/mailgun/groupcache/v2 v2.3.0
|
||||
github.com/onsi/ginkgo/v2 v2.15.0
|
||||
github.com/onsi/gomega v1.30.0
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.18.2
|
||||
github.com/multiformats/go-multihash v0.1.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/viper v1.11.0
|
||||
github.com/vulcanize/eth-ipfs-state-validator/v4 v4.0.3-alpha
|
||||
github.com/vulcanize/gap-filler v0.4.0
|
||||
github.com/vulcanize/ipfs-ethdb/v4 v4.0.2-alpha
|
||||
gorm.io/driver/postgres v1.3.7
|
||||
gorm.io/gorm v1.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/Jorropo/jsync v1.0.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||
github.com/Stebalien/go-bitfield v0.0.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cockroachdb/errors v1.10.0 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/btcsuite/btcd v0.22.1 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/cskr/pubsub v1.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.3.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
|
||||
github.com/ferranbt/fastssz v0.1.2 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/flynn/noise v1.0.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/friendsofgo/graphiql v0.2.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
|
||||
github.com/georgysavva/scany v0.2.9 // indirect
|
||||
github.com/getsentry/sentry-go v0.22.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.12 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/graphql-go/graphql v0.7.9 // indirect
|
||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.0 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/influxdata/influxdb v1.8.3 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||
github.com/ipfs/boxo v0.19.0 // indirect
|
||||
github.com/ipfs/go-bitfield v1.1.0 // indirect
|
||||
github.com/ipfs/go-block-format v0.2.0 // indirect
|
||||
github.com/ipfs/go-cidutil v0.1.0 // indirect
|
||||
github.com/ipfs/go-datastore v0.6.0 // indirect
|
||||
github.com/ipfs/go-ds-measure v0.2.0 // indirect
|
||||
github.com/ipfs/go-bitswap v0.4.0 // indirect
|
||||
github.com/ipfs/go-blockservice v0.1.7 // indirect
|
||||
github.com/ipfs/go-cidutil v0.0.2 // indirect
|
||||
github.com/ipfs/go-datastore v0.4.6 // indirect
|
||||
github.com/ipfs/go-ds-measure v0.1.0 // indirect
|
||||
github.com/ipfs/go-fetcher v1.5.0 // indirect
|
||||
github.com/ipfs/go-filestore v1.0.0 // indirect
|
||||
github.com/ipfs/go-fs-lock v0.0.7 // indirect
|
||||
github.com/ipfs/go-graphsync v0.8.0 // indirect
|
||||
github.com/ipfs/go-ipfs v0.10.0 // indirect
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect
|
||||
github.com/ipfs/go-ipfs-config v0.16.0 // indirect
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect
|
||||
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipfs-redirects-file v0.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipld-cbor v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipld-format v0.6.0 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.2.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-files v0.0.8 // indirect
|
||||
github.com/ipfs/go-ipfs-keystore v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipfs-pinner v0.1.2 // indirect
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipfs-provider v0.6.1 // indirect
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||
github.com/ipfs/go-ipld-cbor v0.0.5 // indirect
|
||||
github.com/ipfs/go-ipld-format v0.2.0 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.1.0 // indirect
|
||||
github.com/ipfs/go-ipns v0.1.2 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.3.0 // indirect
|
||||
github.com/ipfs/go-merkledag v0.4.0 // indirect
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||
github.com/ipfs/go-peertaskqueue v0.8.1 // indirect
|
||||
github.com/ipfs/go-unixfsnode v1.9.0 // indirect
|
||||
github.com/ipfs/kubo v0.27.0 // indirect
|
||||
github.com/ipld/go-car/v2 v2.13.1 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.6.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.21.0 // indirect
|
||||
github.com/ipfs/go-mfs v0.1.2 // indirect
|
||||
github.com/ipfs/go-namesys v0.3.1 // indirect
|
||||
github.com/ipfs/go-path v0.1.2 // indirect
|
||||
github.com/ipfs/go-peertaskqueue v0.4.0 // indirect
|
||||
github.com/ipfs/go-unixfs v0.2.5 // indirect
|
||||
github.com/ipfs/go-unixfsnode v1.1.3 // indirect
|
||||
github.com/ipfs/go-verifcid v0.0.1 // indirect
|
||||
github.com/ipfs/interface-go-ipfs-core v0.5.1 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.3.0 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.12.2 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.14.3 // indirect
|
||||
github.com/jackc/pgconn v1.12.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgtype v1.14.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.18.3 // indirect
|
||||
github.com/jackc/puddle v1.3.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.11.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.16.1 // indirect
|
||||
github.com/jackc/puddle v1.2.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/jinzhu/copier v0.2.4 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/klauspost/compress v1.17.6 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.11.7 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||
github.com/koron/go-ssdp v0.0.2 // indirect
|
||||
github.com/libp2p/go-addr-util v0.1.0 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 // indirect
|
||||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-doh-resolver v0.4.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.33.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.24.4 // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.10.0 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect
|
||||
github.com/libp2p/go-libp2p-record v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.3 // indirect
|
||||
github.com/libp2p/go-libp2p-xor v0.1.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.1 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/libp2p/go-conn-security-multistream v0.2.1 // indirect
|
||||
github.com/libp2p/go-doh-resolver v0.3.1 // indirect
|
||||
github.com/libp2p/go-eventbus v0.2.1 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.0.3 // indirect
|
||||
github.com/libp2p/go-libp2p v0.15.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 // indirect
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.2 // indirect
|
||||
github.com/libp2p/go-libp2p-blankhost v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-circuit v0.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4 // indirect
|
||||
github.com/libp2p/go-libp2p-core v0.9.0 // indirect
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.13.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-mplex v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-nat v0.0.6 // indirect
|
||||
github.com/libp2p/go-libp2p-noise v0.2.2 // indirect
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.8 // indirect
|
||||
github.com/libp2p/go-libp2p-pnet v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub v0.5.4 // indirect
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.12.0 // indirect
|
||||
github.com/libp2p/go-libp2p-record v0.1.3 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.3 // indirect
|
||||
github.com/libp2p/go-libp2p-tls v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 // indirect
|
||||
github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db // indirect
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4 // indirect
|
||||
github.com/libp2p/go-maddr-filter v0.1.0 // indirect
|
||||
github.com/libp2p/go-mplex v0.3.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.0.6 // indirect
|
||||
github.com/libp2p/go-nat v0.0.5 // indirect
|
||||
github.com/libp2p/go-netroute v0.1.6 // indirect
|
||||
github.com/libp2p/go-openssl v0.0.7 // indirect
|
||||
github.com/libp2p/go-reuseport v0.0.2 // indirect
|
||||
github.com/libp2p/go-reuseport-transport v0.0.5 // indirect
|
||||
github.com/libp2p/go-sockaddr v0.1.1 // indirect
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect
|
||||
github.com/libp2p/go-tcp-transport v0.2.8 // indirect
|
||||
github.com/libp2p/go-ws-transport v0.5.0 // indirect
|
||||
github.com/libp2p/go-yamux/v2 v2.2.0 // indirect
|
||||
github.com/libp2p/zeroconf/v2 v2.0.0 // indirect
|
||||
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/matryer/is v1.4.1 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/matryer/is v1.4.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/miekg/dns v1.1.43 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.1 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.12.2 // indirect
|
||||
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.4.0 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||
github.com/multiformats/go-multicodec v0.3.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.2.2 // indirect
|
||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/openrelayxyz/plugeth-utils v1.5.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
|
||||
github.com/pion/datachannel v1.5.5 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.8 // indirect
|
||||
github.com/pion/ice/v2 v2.3.11 // indirect
|
||||
github.com/pion/interceptor v0.1.25 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.9 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.13 // indirect
|
||||
github.com/pion/rtp v1.8.3 // indirect
|
||||
github.com/pion/sctp v1.8.9 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.6 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.18 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/transport/v2 v2.2.4 // indirect
|
||||
github.com/pion/turn/v2 v2.1.4 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.23 // indirect
|
||||
github.com/pganalyze/pg_query_go/v2 v2.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/polydawn/refmt v0.89.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.47.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/quic-go v0.41.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.6.0 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rs/cors v1.9.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/samber/lo v1.39.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/segmentio/fasthash v1.0.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
||||
github.com/thoas/go-funk v0.9.3 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect
|
||||
github.com/urfave/cli/v2 v2.25.7 // indirect
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/stretchr/testify v1.7.1 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/thoas/go-funk v0.9.2 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||
github.com/valyala/fastjson v1.6.3 // indirect
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20240109153615-66e95c3e8a87 // indirect
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
||||
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 // indirect
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/fx v1.20.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.20.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
gonum.org/v1/gonum v0.14.0 // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/dig v1.10.0 // indirect
|
||||
go.uber.org/fx v1.13.1 // indirect
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.2.2 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
lukechampine.com/blake3 v1.1.6 // indirect
|
||||
)
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha
|
||||
|
||||
@ -1,72 +0,0 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
type ContractDeployed struct {
|
||||
Address common.Address `json:"address"`
|
||||
TransactionHash common.Hash `json:"txHash"`
|
||||
BlockNumber int64 `json:"blockNumber"`
|
||||
BlockHash common.Hash `json:"blockHash"`
|
||||
}
|
||||
|
||||
type ContractDestroyed struct {
|
||||
BlockNumber int64 `json:"blockNumber"`
|
||||
}
|
||||
|
||||
type Tx struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value *big.Int `json:"value"`
|
||||
TransactionHash string `json:"txHash"`
|
||||
BlockNumber int64 `json:"blockNumber"`
|
||||
BlockHash string `json:"blockHash"`
|
||||
}
|
||||
|
||||
type StorageKey struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
type CountIncremented struct {
|
||||
BlockNumber *big.Int `json:"blockNumber"`
|
||||
}
|
||||
|
||||
const ContractServerUrl = "http://localhost:3000"
|
||||
|
||||
// Factory which creates endpoint functions
|
||||
func MakeGetAndDecodeFunc[R any](format string) func(...interface{}) (*R, error) {
|
||||
return func(params ...interface{}) (*R, error) {
|
||||
params = append([]interface{}{ContractServerUrl}, params...)
|
||||
url := fmt.Sprintf(format, params...)
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s: %s", url, res.Status)
|
||||
}
|
||||
|
||||
var data R
|
||||
decoder := json.NewDecoder(res.Body)
|
||||
return &data, decoder.Decode(&data)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
DeployContract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/deployContract")
|
||||
DestroyContract = MakeGetAndDecodeFunc[ContractDestroyed]("%s/v1/destroyContract?addr=%s")
|
||||
DeploySLVContract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/deploySLVContract")
|
||||
DestroySLVContract = MakeGetAndDecodeFunc[ContractDestroyed]("%s/v1/destroySLVContract?addr=%s")
|
||||
SendEth = MakeGetAndDecodeFunc[Tx]("%s/v1/sendEth?to=%s&value=%s")
|
||||
GetStorageSlotKey = MakeGetAndDecodeFunc[StorageKey]("%s/v1/getStorageKey?contract=%s&label=%s")
|
||||
IncrementCount = MakeGetAndDecodeFunc[CountIncremented]("%s/v1/incrementCount%s?addr=%s")
|
||||
Create2Contract = MakeGetAndDecodeFunc[ContractDeployed]("%s/v1/create2Contract?contract=%s&salt=%s")
|
||||
)
|
||||
@ -1,378 +0,0 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/integration"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
)
|
||||
|
||||
var _ = Describe("Direct proxy integration test", Label("proxy"), func() {
|
||||
ctx := context.Background()
|
||||
|
||||
var contract *integration.ContractDeployed
|
||||
var tx *integration.Tx
|
||||
var contractErr error
|
||||
var txErr error
|
||||
|
||||
Describe("Get Block", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets not existing block by number", func() {
|
||||
blockNum := contract.BlockNumber + 100
|
||||
|
||||
gethBlock, err := gethClient.BlockByNumber(ctx, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(gethBlock).To(BeZero())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByNumber(ctx, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(ipldBlock).To(BeZero())
|
||||
})
|
||||
|
||||
It("gets not existing block by hash", func() {
|
||||
gethBlock, err := gethClient.BlockByHash(ctx, nonExistingBlockHash)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(gethBlock).To(BeZero())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByHash(ctx, nonExistingBlockHash)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(ipldBlock).To(BeZero())
|
||||
})
|
||||
|
||||
It("gets block by number", func() {
|
||||
blockNum := contract.BlockNumber
|
||||
|
||||
_, err := gethClient.BlockByNumber(ctx, big.NewInt(blockNum))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.BlockByNumber(ctx, big.NewInt(blockNum))
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets block by hash", func() {
|
||||
_, err := gethClient.BlockByHash(ctx, contract.BlockHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.BlockByHash(ctx, contract.BlockHash)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Transaction", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Get tx by hash", func() {
|
||||
_, _, err := gethClient.TransactionByHash(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, _, err = ipldClient.TransactionByHash(ctx, contract.TransactionHash)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
|
||||
It("Get tx by block hash and index", func() {
|
||||
_, err := gethClient.TransactionInBlock(ctx, contract.BlockHash, 0)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.TransactionInBlock(ctx, contract.BlockHash, 0)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Receipt", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Get tx receipt", func() {
|
||||
_, err := gethClient.TransactionReceipt(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.TransactionReceipt(ctx, contract.TransactionHash)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("FilterLogs", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("with blockhash", func() {
|
||||
blockHash := contract.BlockHash
|
||||
filterQuery := ethereum.FilterQuery{
|
||||
//Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
Topics: [][]common.Hash{},
|
||||
}
|
||||
|
||||
gethLogs, err := gethClient.FilterLogs(ctx, filterQuery)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldLogs, err := ipldClient.FilterLogs(ctx, filterQuery)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// not empty list
|
||||
Expect(gethLogs).ToNot(BeEmpty())
|
||||
// empty list
|
||||
Expect(ipldLogs).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CodeAt", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets code of deployed contract with block number", func() {
|
||||
_, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ipldCode).To(BeEmpty())
|
||||
})
|
||||
It("gets code of contract that doesn't exist at this height", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethCode).To(BeEmpty())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
|
||||
It("gets code at non-existing address without block number", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethCode).To(BeEmpty())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
It("gets code of deployed contract without block number", func() {
|
||||
_, err := gethClient.CodeAt(ctx, contract.Address, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ipldCode).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Get balance", func() {
|
||||
var newAddress common.Address
|
||||
rand.Read(newAddress[:])
|
||||
|
||||
BeforeEach(func() {
|
||||
tx, txErr = integration.SendEth(newAddress, "0.01")
|
||||
Expect(txErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets balance for an account with eth without block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethBalance.String()).To(Equal("10000000000000000"))
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ipldBalance.String()).To(Equal("0"))
|
||||
})
|
||||
It("gets balance for an account with eth with block number", func() {
|
||||
_, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
It("gets historical balance for an account with eth with block number", func() {
|
||||
_, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
|
||||
It("gets balance for a non-existing account without block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
It("gets balance for an non-existing block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber+3))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, nonExistingAddress, big.NewInt(tx.BlockNumber+3))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Get Storage", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets ERC20 total supply (without block number)", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ipldStorage).To(Equal(make([]byte, 32)))
|
||||
})
|
||||
|
||||
It("gets ERC20 total supply (with block number)", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing account", func() {
|
||||
_, err := gethClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing contract slot", func() {
|
||||
_, err := gethClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing contract", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing block number", func() {
|
||||
blockNum := contract.BlockNumber + 100
|
||||
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("get storage after self destruct", func() {
|
||||
tx, err := integration.DestroyContract(contract.Address)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethStorage1, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
gethStorage2, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethStorage1).NotTo(Equal(gethStorage2))
|
||||
Expect(gethStorage2).To(Equal(eth.EmptyNodeValue))
|
||||
|
||||
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
_, err = ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
// Query the current block
|
||||
ipldStorage3, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(eth.EmptyNodeValue).To(Equal(ipldStorage3))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth call", func() {
|
||||
var msg ethereum.CallMsg
|
||||
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
|
||||
msg = ethereum.CallMsg{
|
||||
To: &contract.Address,
|
||||
Data: common.Hex2Bytes("18160ddd"), // totalSupply()
|
||||
}
|
||||
})
|
||||
|
||||
It("calls totalSupply() without block number", func() {
|
||||
gethResult, err := gethClient.CallContract(ctx, msg, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethResult)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldResult, err := ipldClient.CallContract(ctx, msg, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethResult).To(Equal(ipldResult))
|
||||
})
|
||||
|
||||
It("calls totalSupply() with block number", func() {
|
||||
gethResult, err := gethClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethResult)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldResult, err := ipldClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethResult).To(Equal(ipldResult))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Chain ID", func() {
|
||||
It("Check chain id", func() {
|
||||
_, err := gethClient.ChainID(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = ipldClient.ChainID(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,53 +0,0 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "integration test suite")
|
||||
}
|
||||
|
||||
var (
|
||||
gethHttpPath = "http://127.0.0.1:8545"
|
||||
ipldEthHttpPath = "http://127.0.0.1:8081"
|
||||
|
||||
gethClient *ethclient.Client
|
||||
ipldClient *ethclient.Client
|
||||
gethRPCClient *rpc.Client
|
||||
ipldRPCClient *rpc.Client
|
||||
|
||||
testChainId int64 = 99
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
|
||||
envChainID := os.Getenv("ETH_CHAIN_ID")
|
||||
if len(envChainID) == 0 {
|
||||
panic("ETH_CHAIN_ID must be set")
|
||||
}
|
||||
testChainId, err = strconv.ParseInt(envChainID, 10, 64)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
if path := os.Getenv("ETH_HTTP_PATH"); len(path) != 0 {
|
||||
gethHttpPath = "http://" + path
|
||||
}
|
||||
if path := os.Getenv("SERVER_HTTP_PATH"); len(path) != 0 {
|
||||
ipldEthHttpPath = "http://" + path
|
||||
}
|
||||
|
||||
gethClient, err = ethclient.Dial(gethHttpPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldClient, err = ethclient.Dial(ipldEthHttpPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@ -1,532 +0,0 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
integration "github.com/cerc-io/ipld-eth-server/v5/integration"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
)
|
||||
|
||||
var (
|
||||
nonExistingBlockHash = common.HexToHash("0x111111111111111111111111111111111111111111111111111111111111111")
|
||||
nonExistingAddress = common.HexToAddress("0x1111111111111111111111111111111111111111")
|
||||
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||
|
||||
erc20TotalSupply, _ = new(big.Int).SetString("1000000000000000000000", 10)
|
||||
ercTotalSupplyIndex = common.HexToHash("0x2")
|
||||
)
|
||||
|
||||
var _ = Describe("Basic integration test", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
var contract *integration.ContractDeployed
|
||||
var tx *integration.Tx
|
||||
var contractErr error
|
||||
var txErr error
|
||||
|
||||
Describe("Get Block", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
|
||||
err := waitForBlock(ctx, ipldClient, contract.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets not existing block by number", func() {
|
||||
blockNum := big.NewInt(contract.BlockNumber + 100)
|
||||
|
||||
gethBlock, err := gethClient.BlockByNumber(ctx, blockNum)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(gethBlock).To(BeZero())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByNumber(ctx, blockNum)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(ipldBlock).To(BeZero())
|
||||
})
|
||||
|
||||
It("gets not existing block by hash", func() {
|
||||
gethBlock, err := gethClient.BlockByHash(ctx, nonExistingBlockHash)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(gethBlock).To(BeZero())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByHash(ctx, nonExistingBlockHash)
|
||||
Expect(err).To(MatchError(ethereum.NotFound))
|
||||
Expect(ipldBlock).To(BeZero())
|
||||
})
|
||||
|
||||
It("gets block by number", func() {
|
||||
blockNum := big.NewInt(contract.BlockNumber)
|
||||
|
||||
gethBlock, err := gethClient.BlockByNumber(ctx, blockNum)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByNumber(ctx, blockNum)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// check headers are equals
|
||||
Expect(gethBlock.Header()).To(Equal(ipldBlock.Header()))
|
||||
|
||||
gethTxs := gethBlock.Transactions()
|
||||
ipldTxs := ipldBlock.Transactions()
|
||||
|
||||
Expect(gethTxs.Len()).To(Equal(ipldTxs.Len()))
|
||||
Expect(types.TxDifference(gethTxs, ipldTxs).Len()).To(Equal(0))
|
||||
})
|
||||
|
||||
It("gets block by hash", func() {
|
||||
gethBlock, err := gethClient.BlockByHash(ctx, contract.BlockHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBlock, err := ipldClient.BlockByHash(ctx, contract.BlockHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// check headers are equals
|
||||
compareBlocks(gethBlock, ipldBlock)
|
||||
|
||||
gethTxs := gethBlock.Transactions()
|
||||
ipldTxs := ipldBlock.Transactions()
|
||||
|
||||
Expect(gethTxs.Len()).To(Equal(ipldTxs.Len()))
|
||||
Expect(types.TxDifference(gethTxs, ipldTxs).Len()).To(Equal(0))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Transaction", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Get tx by hash", func() {
|
||||
gethTx, _, err := gethClient.TransactionByHash(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldTx, _, err := ipldClient.TransactionByHash(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compareTxs(gethTx, ipldTx)
|
||||
|
||||
Expect(gethTx.Hash()).To(Equal(ipldTx.Hash()))
|
||||
})
|
||||
|
||||
It("Get tx by block hash and index", func() {
|
||||
gethTx, err := gethClient.TransactionInBlock(ctx, contract.BlockHash, 0)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldTx, err := ipldClient.TransactionInBlock(ctx, contract.BlockHash, 0)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compareTxs(gethTx, ipldTx)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Receipt", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Get tx receipt", func() {
|
||||
gethReceipt, err := gethClient.TransactionReceipt(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldReceipt, err := ipldClient.TransactionReceipt(ctx, contract.TransactionHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethReceipt).To(Equal(ipldReceipt))
|
||||
|
||||
rlpGeth, err := gethReceipt.MarshalBinary()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
rlpIpld, err := ipldReceipt.MarshalBinary()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(rlpGeth).To(Equal(rlpIpld))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("FilterLogs", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("with blockhash", func() {
|
||||
blockHash := contract.BlockHash
|
||||
filterQuery := ethereum.FilterQuery{
|
||||
//Addresses: addresses,
|
||||
BlockHash: &blockHash,
|
||||
Topics: [][]common.Hash{},
|
||||
}
|
||||
|
||||
gethLogs, err := gethClient.FilterLogs(ctx, filterQuery)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldLogs, err := ipldClient.FilterLogs(ctx, filterQuery)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// not empty list
|
||||
Expect(gethLogs).ToNot(BeEmpty())
|
||||
|
||||
Expect(len(gethLogs)).To(Equal(len(ipldLogs)))
|
||||
Expect(gethLogs).To(Equal(ipldLogs))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CodeAt", func() {
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets code at non-existing address without block number", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethCode).To(BeEmpty())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
It("gets code of deployed contract without block number", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, contract.Address, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
It("gets code of deployed contract with block number", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
It("gets code of contract that doesn't exist at this height", func() {
|
||||
gethCode, err := gethClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCode, err := ipldClient.CodeAt(ctx, contract.Address, big.NewInt(contract.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethCode).To(BeEmpty())
|
||||
Expect(gethCode).To(Equal(ipldCode))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Get balance", func() {
|
||||
var newAddress common.Address
|
||||
rand.Read(newAddress[:])
|
||||
|
||||
BeforeEach(func() {
|
||||
tx, txErr = integration.SendEth(newAddress, "0.01")
|
||||
Expect(txErr).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets balance for an account with eth with block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
It("gets balance for an account with eth without block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
|
||||
It("gets historical balance for an account with eth with block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
It("gets balance for a non-existing account without block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, nonExistingAddress, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
It("gets balance for an non-existing block number", func() {
|
||||
gethBalance, err := gethClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber+3))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
ipldBalance, err := ipldClient.BalanceAt(ctx, newAddress, big.NewInt(tx.BlockNumber+3))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
Expect(gethBalance).To(Equal(ipldBalance))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Get Storage", func() {
|
||||
var contractSalt string
|
||||
countAIndex := common.HexToHash("0x5")
|
||||
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
Expect(contract.BlockNumber).ToNot(BeZero())
|
||||
|
||||
contractSalt = common.Bytes2Hex(contract.BlockHash[:10])
|
||||
|
||||
err := waitForBlock(ctx, ipldClient, contract.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("gets ERC20 total supply (with block number)", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets ERC20 total supply (without block number)", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethStorage)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing account", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, nonExistingAddress, ercTotalSupplyIndex, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing contract slot", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, randomHash, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing contract", func() {
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for non-existing block number", func() {
|
||||
blockNum := contract.BlockNumber + 100
|
||||
gethStorage, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(blockNum))
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
Expect(gethStorage).To(Equal(ipldStorage))
|
||||
})
|
||||
|
||||
It("gets storage for SLV countA after tx", func() {
|
||||
slvContract, contractErr := integration.Create2Contract("SLVToken", contractSalt)
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
|
||||
gethStorage, err := gethClient.StorageAt(ctx, slvContract.Address, countAIndex, big.NewInt(slvContract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
slvCountA := new(big.Int).SetBytes(gethStorage)
|
||||
|
||||
err = waitForBlock(ctx, ipldClient, slvContract.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, slvContract.Address, countAIndex, big.NewInt(slvContract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCountA := new(big.Int).SetBytes(ipldStorage)
|
||||
Expect(ipldCountA).To(Equal(slvCountA))
|
||||
|
||||
inc, err := integration.IncrementCount("A", slvContract.Address)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
slvCountA.Add(slvCountA, big.NewInt(1))
|
||||
|
||||
ipldStorage, err = ipldClient.StorageAt(ctx, slvContract.Address, countAIndex, inc.BlockNumber)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldCountA = new(big.Int).SetBytes(ipldStorage)
|
||||
Expect(ipldCountA).To(Equal(slvCountA))
|
||||
})
|
||||
|
||||
It("gets storage after destruction", func() {
|
||||
slvContract, contractErr := integration.Create2Contract("SLVToken", contractSalt)
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
|
||||
tx, err := integration.DestroyContract(contract.Address)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
slvTx, err := integration.DestroyContract(slvContract.Address)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethStorage1, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
gethStorage2, err := gethClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethStorage1).NotTo(Equal(gethStorage2))
|
||||
Expect(gethStorage2).To(Equal(eth.EmptyNodeValue))
|
||||
|
||||
ipldStorage1, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber-1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
ipldStorage2, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, big.NewInt(tx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(ipldStorage1).To(Equal(gethStorage1))
|
||||
Expect(ipldStorage2).To(Equal(gethStorage2))
|
||||
|
||||
// Query the current block
|
||||
ipldStorage3, err := ipldClient.StorageAt(ctx, contract.Address, ercTotalSupplyIndex, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(ipldStorage2).To(Equal(ipldStorage3))
|
||||
|
||||
// Check for SLV contract
|
||||
gethStorage, err := gethClient.StorageAt(ctx, slvContract.Address, countAIndex, big.NewInt(slvTx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gethStorage).To(Equal(eth.EmptyNodeValue))
|
||||
|
||||
ipldStorage, err := ipldClient.StorageAt(ctx, slvContract.Address, countAIndex, big.NewInt(slvTx.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ipldStorage).To(Equal(gethStorage))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth call", func() {
|
||||
var msg ethereum.CallMsg
|
||||
|
||||
BeforeEach(func() {
|
||||
contract, contractErr = integration.DeployContract()
|
||||
Expect(contractErr).ToNot(HaveOccurred())
|
||||
|
||||
msg = ethereum.CallMsg{
|
||||
To: &contract.Address,
|
||||
Data: common.Hex2Bytes("18160ddd"), // totalSupply()
|
||||
}
|
||||
})
|
||||
|
||||
It("calls totalSupply() without block number", func() {
|
||||
gethResult, err := gethClient.CallContract(ctx, msg, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethResult)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldResult, err := ipldClient.CallContract(ctx, msg, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethResult).To(Equal(ipldResult))
|
||||
})
|
||||
|
||||
It("calls totalSupply() with block number", func() {
|
||||
gethResult, err := gethClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethResult)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldResult, err := ipldClient.CallContract(ctx, msg, big.NewInt(contract.BlockNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethResult).To(Equal(ipldResult))
|
||||
})
|
||||
|
||||
It("calls totalSupply() with block hash", func() {
|
||||
gethResult, err := gethClient.CallContractAtHash(ctx, msg, contract.BlockHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
gethTotalSupply := new(big.Int).SetBytes(gethResult)
|
||||
Expect(gethTotalSupply).To(Equal(erc20TotalSupply))
|
||||
|
||||
ipldResult, err := ipldClient.CallContractAtHash(ctx, msg, contract.BlockHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethResult).To(Equal(ipldResult))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Chain ID", func() {
|
||||
It("Check chain id", func() {
|
||||
gethChainId, err := gethClient.ChainID(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldChainId, err := ipldClient.ChainID(ctx)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethChainId).To(Equal(ipldChainId))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func compareBlocks(block1 *types.Block, block2 *types.Block) {
|
||||
GinkgoHelper()
|
||||
Expect(block1.Header()).To(Equal(block2.Header()))
|
||||
Expect(block1.Uncles()).To(Equal(block2.Uncles()))
|
||||
|
||||
txs1 := block1.Transactions()
|
||||
txs2 := block2.Transactions()
|
||||
|
||||
Expect(len(txs1)).To(Equal(len(txs2)))
|
||||
for i, tx := range txs1 {
|
||||
compareTxs(tx, txs2[i])
|
||||
}
|
||||
}
|
||||
|
||||
func compareTxs(tx1 *types.Transaction, tx2 *types.Transaction) {
|
||||
GinkgoHelper()
|
||||
Expect(tx1.Data()).To(Equal(tx2.Data()))
|
||||
Expect(tx1.Hash()).To(Equal(tx2.Hash()))
|
||||
Expect(tx1.Size()).To(Equal(tx2.Size()))
|
||||
|
||||
signer := types.NewLondonSigner(big.NewInt(testChainId))
|
||||
|
||||
gethSender, err := types.Sender(signer, tx1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ipldSender, err := types.Sender(signer, tx2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(gethSender).To(Equal(ipldSender))
|
||||
}
|
||||
@ -1,28 +0,0 @@
|
||||
package integration_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
)
|
||||
|
||||
func waitForBlock(ctx context.Context, client *ethclient.Client, target int64) error {
|
||||
timeout := 10 * time.Second
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
return errors.New("timed out")
|
||||
default:
|
||||
latest, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(target) <= latest {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
7
main.go
7
main.go
@ -16,9 +16,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/cerc-io/ipld-eth-server/v5/cmd"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logrus.SetFormatter(&logrus.TextFormatter{
|
||||
FullTimestamp: true,
|
||||
})
|
||||
cmd.Execute()
|
||||
}
|
||||
|
||||
BIN
pkg/.DS_Store
vendored
Normal file
BIN
pkg/.DS_Store
vendored
Normal file
Binary file not shown.
44
pkg/client/client.go
Normal file
44
pkg/client/client.go
Normal file
@ -0,0 +1,44 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Client is used by watchers to stream chain IPLD data from a vulcanizedb ipld-eth-server
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/serve"
|
||||
)
|
||||
|
||||
// Client is used to subscribe to the ipld-eth-server ipld data stream
|
||||
type Client struct {
|
||||
c *rpc.Client
|
||||
}
|
||||
|
||||
// NewClient creates a new Client
|
||||
func NewClient(c *rpc.Client) *Client {
|
||||
return &Client{
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Stream is the main loop for subscribing to iplds from an ipld-eth-server server
|
||||
func (c *Client) Stream(payloadChan chan serve.SubscriptionPayload, params eth.SubscriptionSettings) (*rpc.ClientSubscription, error) {
|
||||
return c.c.Subscribe(context.Background(), "vdb", payloadChan, "stream", params)
|
||||
}
|
||||
@ -1,51 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2022 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
)
|
||||
|
||||
var _ tracers.Backend = &Backend{}
|
||||
|
||||
var (
|
||||
errMethodNotSupported = errors.New("backend method not supported")
|
||||
)
|
||||
|
||||
// Backend implements tracers.Backend interface
|
||||
type Backend struct {
|
||||
eth.Backend
|
||||
}
|
||||
|
||||
// StateAtBlock retrieves the state database associated with a certain block
|
||||
func (b *Backend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) {
|
||||
return nil, func() {}, errMethodNotSupported
|
||||
}
|
||||
|
||||
// StateAtTransaction returns the execution environment of a certain transaction
|
||||
func (b *Backend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
|
||||
return nil, vm.BlockContext{}, nil, func() {}, errMethodNotSupported
|
||||
}
|
||||
577
pkg/eth/api.go
577
pkg/eth/api.go
File diff suppressed because it is too large
Load Diff
@ -14,15 +14,13 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_api_test
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -30,29 +28,30 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||
number = rpc.BlockNumber(test_helpers.BlockNumber1)
|
||||
blockTime = test_helpers.BlockTime1
|
||||
londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum)
|
||||
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
|
||||
londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum.Int64())
|
||||
wrongNumber = number + 1
|
||||
blockHash = test_helpers.MockBlock.Header().Hash()
|
||||
baseFee = test_helpers.MockLondonBlock.BaseFee()
|
||||
ctx = context.Background()
|
||||
chainConfig = &*params.MergedTestChainConfig
|
||||
|
||||
expectedBlock = map[string]interface{}{
|
||||
expectedBlock = map[string]interface{}{
|
||||
"number": (*hexutil.Big)(test_helpers.MockBlock.Number()),
|
||||
"hash": test_helpers.MockBlock.Hash(),
|
||||
"parentHash": test_helpers.MockBlock.ParentHash(),
|
||||
@ -84,6 +83,7 @@ var (
|
||||
"miner": test_helpers.MockBlock.Header().Coinbase,
|
||||
"difficulty": (*hexutil.Big)(test_helpers.MockBlock.Header().Difficulty),
|
||||
"extraData": hexutil.Bytes(test_helpers.MockBlock.Header().Extra),
|
||||
"size": hexutil.Uint64(test_helpers.MockBlock.Header().Size()),
|
||||
"gasLimit": hexutil.Uint64(test_helpers.MockBlock.Header().GasLimit),
|
||||
"gasUsed": hexutil.Uint64(test_helpers.MockBlock.Header().GasUsed),
|
||||
"timestamp": hexutil.Uint64(test_helpers.MockBlock.Header().Time),
|
||||
@ -131,22 +131,14 @@ var (
|
||||
"receiptsRoot": test_helpers.MockUncles[1].ReceiptHash,
|
||||
"uncles": []common.Hash{},
|
||||
}
|
||||
expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), blockTime, 0, test_helpers.MockBlock.BaseFee(), chainConfig)
|
||||
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), blockTime, 1, test_helpers.MockBlock.BaseFee(), chainConfig)
|
||||
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), blockTime, 2, test_helpers.MockBlock.BaseFee(), chainConfig)
|
||||
expectedLondonTransaction = eth.NewRPCTransaction(
|
||||
test_helpers.MockLondonTransactions[0],
|
||||
test_helpers.MockLondonBlock.Hash(),
|
||||
test_helpers.MockLondonBlock.NumberU64(),
|
||||
test_helpers.MockLondonBlock.Time(),
|
||||
0,
|
||||
test_helpers.MockLondonBlock.BaseFee(),
|
||||
chainConfig,
|
||||
)
|
||||
expectRawTx, _ = test_helpers.MockTransactions[0].MarshalBinary()
|
||||
expectRawTx2, _ = test_helpers.MockTransactions[1].MarshalBinary()
|
||||
expectRawTx3, _ = test_helpers.MockTransactions[2].MarshalBinary()
|
||||
expectedReceipt = map[string]interface{}{
|
||||
expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 0, test_helpers.MockBlock.BaseFee())
|
||||
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1, test_helpers.MockBlock.BaseFee())
|
||||
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2, test_helpers.MockBlock.BaseFee())
|
||||
expectedLondonTransaction = eth.NewRPCTransaction(test_helpers.MockLondonTransactions[0], test_helpers.MockLondonBlock.Hash(), test_helpers.MockLondonBlock.NumberU64(), 0, test_helpers.MockLondonBlock.BaseFee())
|
||||
expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0])
|
||||
expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1])
|
||||
expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2])
|
||||
expectedReceipt = map[string]interface{}{
|
||||
"blockHash": blockHash,
|
||||
"blockNumber": hexutil.Uint64(uint64(number.Int64())),
|
||||
"transactionHash": expectedTransaction.Hash,
|
||||
@ -159,8 +151,6 @@ var (
|
||||
"logs": test_helpers.MockReceipts[0].Logs,
|
||||
"logsBloom": test_helpers.MockReceipts[0].Bloom,
|
||||
"status": hexutil.Uint(test_helpers.MockReceipts[0].Status),
|
||||
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(100)),
|
||||
"type": hexutil.Uint64(types.LegacyTxType),
|
||||
}
|
||||
expectedReceipt2 = map[string]interface{}{
|
||||
"blockHash": blockHash,
|
||||
@ -175,8 +165,6 @@ var (
|
||||
"logs": test_helpers.MockReceipts[1].Logs,
|
||||
"logsBloom": test_helpers.MockReceipts[1].Bloom,
|
||||
"root": hexutil.Bytes(test_helpers.MockReceipts[1].PostState),
|
||||
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(200)),
|
||||
"type": hexutil.Uint64(types.LegacyTxType),
|
||||
}
|
||||
expectedReceipt3 = map[string]interface{}{
|
||||
"blockHash": blockHash,
|
||||
@ -191,81 +179,80 @@ var (
|
||||
"logs": test_helpers.MockReceipts[2].Logs,
|
||||
"logsBloom": test_helpers.MockReceipts[2].Bloom,
|
||||
"root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState),
|
||||
"effectiveGasPrice": (*hexutil.Big)(big.NewInt(150)),
|
||||
"type": hexutil.Uint64(types.LegacyTxType),
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
db *sqlx.DB
|
||||
api *eth.PublicEthAPI
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var (
|
||||
err error
|
||||
tx interfaces.Batch
|
||||
)
|
||||
|
||||
db = shared.SetupDB()
|
||||
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
backend, err := eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: &shared.GroupCacheConfig{
|
||||
StateDB: shared.GroupConfig{
|
||||
Name: "api_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
api, _ = eth.NewPublicEthAPI(backend, nil, eth.APIConfig{StateDiffTimeout: shared.DefaultStateDiffTimeout})
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
ipld := sdtypes.IPLD{
|
||||
CID: ipld.Keccak256ToCid(ipld.RawBinary, test_helpers.CodeHash.Bytes()).String(),
|
||||
Content: test_helpers.ContractCode,
|
||||
}
|
||||
err = indexAndPublisher.PushIPLD(tx, ipld)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, node := range test_helpers.MockStateNodes {
|
||||
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
uncles := test_helpers.MockBlock.Uncles()
|
||||
uncleHashes := make([]common.Hash, len(uncles))
|
||||
for i, uncle := range uncles {
|
||||
uncleHashes[i] = uncle.Hash()
|
||||
}
|
||||
expectedBlock["uncles"] = uncleHashes
|
||||
|
||||
// setting chain config to for london block
|
||||
chainConfig.LondonBlock = big.NewInt(2)
|
||||
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() { shared.TearDownDB(db) })
|
||||
|
||||
var _ = Describe("API", func() {
|
||||
var (
|
||||
db *sqlx.DB
|
||||
api *eth.PublicEthAPI
|
||||
chainConfig = params.TestChainConfig
|
||||
)
|
||||
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
|
||||
// Note: if you focus one of the tests be sure to focus this and the defered It()
|
||||
It("test init", func() {
|
||||
var (
|
||||
err error
|
||||
tx interfaces.Batch
|
||||
)
|
||||
|
||||
db = shared.SetupDB()
|
||||
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
backend, err := eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||
StateDB: ethServerShared.GroupConfig{
|
||||
Name: "api_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ccHash := sdtypes.CodeAndCodeHash{
|
||||
Hash: test_helpers.ContractCodeHash,
|
||||
Code: test_helpers.ContractCode,
|
||||
}
|
||||
|
||||
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, node := range test_helpers.MockStateNodes {
|
||||
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
uncles := test_helpers.MockBlock.Uncles()
|
||||
uncleHashes := make([]common.Hash, len(uncles))
|
||||
for i, uncle := range uncles {
|
||||
uncleHashes[i] = uncle.Hash()
|
||||
}
|
||||
expectedBlock["uncles"] = uncleHashes
|
||||
|
||||
// setting chain config to for london block
|
||||
chainConfig.LondonBlock = big.NewInt(2)
|
||||
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
// Single test db tear down at end of all tests
|
||||
defer It("test teardown", func() { shared.TearDownDB(db) })
|
||||
/*
|
||||
|
||||
Headers and blocks
|
||||
@ -301,7 +288,9 @@ var _ = Describe("API", func() {
|
||||
Describe("eth_blockNumber", func() {
|
||||
It("Retrieves the head block number", func() {
|
||||
bn := api.BlockNumber()
|
||||
Expect(bn).To(Equal(hexutil.Uint64(test_helpers.LondonBlockNum)))
|
||||
ubn := (uint64)(bn)
|
||||
subn := strconv.FormatUint(ubn, 10)
|
||||
Expect(subn).To(Equal(test_helpers.LondonBlockNum.String()))
|
||||
})
|
||||
})
|
||||
|
||||
@ -323,7 +312,7 @@ var _ = Describe("API", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
transactions := make([]interface{}, len(test_helpers.MockBlock.Transactions()))
|
||||
for i, trx := range test_helpers.MockBlock.Transactions() {
|
||||
transactions[i] = eth.NewRPCTransactionFromBlockHash(test_helpers.MockBlock, trx.Hash(), chainConfig)
|
||||
transactions[i] = eth.NewRPCTransactionFromBlockHash(test_helpers.MockBlock, trx.Hash())
|
||||
}
|
||||
expectedBlock["transactions"] = transactions
|
||||
for key, val := range expectedBlock {
|
||||
@ -338,24 +327,12 @@ var _ = Describe("API", func() {
|
||||
It("Fetch BaseFee from london block by block number, returns `nil` for legacy block", func() {
|
||||
block, err := api.GetBlockByNumber(ctx, number, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, ok := block["baseFeePerGas"]
|
||||
_, ok := block["baseFee"]
|
||||
Expect(ok).To(Equal(false))
|
||||
|
||||
block, err = api.GetBlockByNumber(ctx, londonBlockNum, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(block["baseFeePerGas"]).To(Equal((*hexutil.Big)(baseFee)))
|
||||
})
|
||||
It("Retrieves a block by number with uncles in correct order", func() {
|
||||
block, err := api.GetBlockByNumber(ctx, londonBlockNum, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedUncles := []common.Hash{
|
||||
test_helpers.MockLondonUncles[0].Hash(),
|
||||
test_helpers.MockLondonUncles[1].Hash(),
|
||||
}
|
||||
Expect(block["uncles"]).To(Equal(expectedUncles))
|
||||
Expect(block["sha3Uncles"]).To(Equal(test_helpers.MockLondonBlock.UncleHash()))
|
||||
Expect(block["hash"]).To(Equal(test_helpers.MockLondonBlock.Hash()))
|
||||
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||
})
|
||||
})
|
||||
|
||||
@ -377,7 +354,7 @@ var _ = Describe("API", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
transactions := make([]interface{}, len(test_helpers.MockBlock.Transactions()))
|
||||
for i, trx := range test_helpers.MockBlock.Transactions() {
|
||||
transactions[i] = eth.NewRPCTransactionFromBlockHash(test_helpers.MockBlock, trx.Hash(), chainConfig)
|
||||
transactions[i] = eth.NewRPCTransactionFromBlockHash(test_helpers.MockBlock, trx.Hash())
|
||||
}
|
||||
expectedBlock["transactions"] = transactions
|
||||
for key, val := range expectedBlock {
|
||||
@ -390,25 +367,13 @@ var _ = Describe("API", func() {
|
||||
Expect(block).To(BeZero())
|
||||
})
|
||||
It("Fetch BaseFee from london block by block hash, returns `nil` for legacy block", func() {
|
||||
block, err := api.GetBlockByHash(ctx, test_helpers.MockBlock.Hash(), false)
|
||||
block, err := api.GetBlockByHash(ctx, test_helpers.MockBlock.Hash(), true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, ok := block["baseFeePerGas"]
|
||||
_, ok := block["baseFee"]
|
||||
Expect(ok).To(Equal(false))
|
||||
block, err = api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(block["baseFeePerGas"]).To(Equal((*hexutil.Big)(baseFee)))
|
||||
})
|
||||
It("Retrieves a block by hash with uncles in correct order", func() {
|
||||
block, err := api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedUncles := []common.Hash{
|
||||
test_helpers.MockLondonUncles[0].Hash(),
|
||||
test_helpers.MockLondonUncles[1].Hash(),
|
||||
}
|
||||
Expect(block["uncles"]).To(Equal(expectedUncles))
|
||||
Expect(block["sha3Uncles"]).To(Equal(test_helpers.MockLondonBlock.UncleHash()))
|
||||
Expect(block["hash"]).To(Equal(test_helpers.MockLondonBlock.Hash()))
|
||||
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||
})
|
||||
})
|
||||
|
||||
@ -1110,20 +1075,20 @@ var _ = Describe("API", func() {
|
||||
It("Retrieves the eth balance for the provided account address at the block with the provided number", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithNumber(number))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(test_helpers.AccountBalance.ToBig())))
|
||||
Expect(bal).To(Equal((*hexutil.Big)(test_helpers.AccountBalance)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddress, rpc.BlockNumberOrHashWithNumber(number))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal.ToInt().Cmp(common.Big0)).To(Equal(0))
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
})
|
||||
It("Retrieves the eth balance for the provided account address at the block with the provided hash", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(test_helpers.AccountBalance.ToBig())))
|
||||
Expect(bal).To(Equal((*hexutil.Big)(test_helpers.AccountBalance)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddress, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal.ToInt().Cmp(common.Big0)).To(Equal(0))
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
})
|
||||
It("Retrieves the eth balance for the non-existing account address at the block with the provided hash", func() {
|
||||
bal, err := api.GetBalance(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
||||
1011
pkg/eth/backend.go
1011
pkg/eth/backend.go
File diff suppressed because it is too large
Load Diff
@ -17,35 +17,24 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
nodeiter "github.com/cerc-io/eth-iterator-utils"
|
||||
"github.com/cerc-io/plugeth-statediff/utils"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/state"
|
||||
)
|
||||
|
||||
var nullHashBytes = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
||||
var emptyCodeHash = crypto.Keccak256([]byte{})
|
||||
|
||||
// These marshalling functions are from internal/ethapi so we have to make our own versions here:
|
||||
|
||||
// RPCMarshalHeader converts the given header to the RPC output.
|
||||
// This function is eth/internal so we have to make our own version here...
|
||||
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
||||
result := map[string]interface{}{
|
||||
headerMap := map[string]interface{}{
|
||||
"number": (*hexutil.Big)(head.Number),
|
||||
"hash": head.Hash(),
|
||||
"parentHash": head.ParentHash,
|
||||
@ -57,67 +46,24 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
||||
"miner": head.Coinbase,
|
||||
"difficulty": (*hexutil.Big)(head.Difficulty),
|
||||
"extraData": hexutil.Bytes(head.Extra),
|
||||
"size": hexutil.Uint64(head.Size()),
|
||||
"gasLimit": hexutil.Uint64(head.GasLimit),
|
||||
"gasUsed": hexutil.Uint64(head.GasUsed),
|
||||
"timestamp": hexutil.Uint64(head.Time),
|
||||
"transactionsRoot": head.TxHash,
|
||||
"receiptsRoot": head.ReceiptHash,
|
||||
}
|
||||
|
||||
if head.BaseFee != nil {
|
||||
result["baseFeePerGas"] = (*hexutil.Big)(head.BaseFee)
|
||||
headerMap["baseFee"] = head.BaseFee
|
||||
}
|
||||
if head.WithdrawalsHash != nil {
|
||||
result["withdrawalsRoot"] = head.WithdrawalsHash
|
||||
}
|
||||
if head.BlobGasUsed != nil {
|
||||
result["blobGasUsed"] = hexutil.Uint64(*head.BlobGasUsed)
|
||||
}
|
||||
if head.ExcessBlobGas != nil {
|
||||
result["excessBlobGas"] = hexutil.Uint64(*head.ExcessBlobGas)
|
||||
}
|
||||
if head.ParentBeaconRoot != nil {
|
||||
result["parentBeaconBlockRoot"] = head.ParentBeaconRoot
|
||||
}
|
||||
return result
|
||||
return headerMap
|
||||
}
|
||||
|
||||
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
|
||||
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
|
||||
// transaction hashes.
|
||||
func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} {
|
||||
fields := RPCMarshalHeader(block.Header())
|
||||
fields["size"] = hexutil.Uint64(block.Size())
|
||||
|
||||
if inclTx {
|
||||
formatTx := func(idx int, tx *types.Transaction) interface{} {
|
||||
return tx.Hash()
|
||||
}
|
||||
if fullTx {
|
||||
formatTx = func(idx int, tx *types.Transaction) interface{} {
|
||||
return newRPCTransactionFromBlockIndex(block, uint64(idx), config)
|
||||
}
|
||||
}
|
||||
txs := block.Transactions()
|
||||
transactions := make([]interface{}, len(txs))
|
||||
for i, tx := range txs {
|
||||
transactions[i] = formatTx(i, tx)
|
||||
}
|
||||
fields["transactions"] = transactions
|
||||
}
|
||||
uncles := block.Uncles()
|
||||
uncleHashes := make([]common.Hash, len(uncles))
|
||||
for i, uncle := range uncles {
|
||||
uncleHashes[i] = uncle.Hash()
|
||||
}
|
||||
fields["uncles"] = uncleHashes
|
||||
if block.Header().WithdrawalsHash != nil {
|
||||
fields["withdrawals"] = block.Withdrawals()
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// RPCMarshalBlockWithUncleHashes marshals the block with the provided uncle hashes
|
||||
func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) {
|
||||
func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
|
||||
fields := RPCMarshalHeader(block.Header())
|
||||
fields["size"] = hexutil.Uint64(block.Size())
|
||||
|
||||
@ -127,7 +73,41 @@ func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Has
|
||||
}
|
||||
if fullTx {
|
||||
formatTx = func(tx *types.Transaction) (interface{}, error) {
|
||||
return NewRPCTransactionFromBlockHash(block, tx.Hash(), config), nil
|
||||
return NewRPCTransactionFromBlockHash(block, tx.Hash()), nil
|
||||
}
|
||||
}
|
||||
txs := block.Transactions()
|
||||
transactions := make([]interface{}, len(txs))
|
||||
var err error
|
||||
for i, tx := range txs {
|
||||
if transactions[i], err = formatTx(tx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fields["transactions"] = transactions
|
||||
}
|
||||
uncles := block.Uncles()
|
||||
uncleHashes := make([]common.Hash, len(uncles))
|
||||
for i, uncle := range uncles {
|
||||
uncleHashes[i] = uncle.Hash()
|
||||
}
|
||||
fields["uncles"] = uncleHashes
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
// RPCMarshalBlockWithUncleHashes marshals the block with the provided uncle hashes
|
||||
func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Hash, inclTx bool, fullTx bool) (map[string]interface{}, error) {
|
||||
fields := RPCMarshalHeader(block.Header())
|
||||
fields["size"] = hexutil.Uint64(block.Size())
|
||||
|
||||
if inclTx {
|
||||
formatTx := func(tx *types.Transaction) (interface{}, error) {
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
if fullTx {
|
||||
formatTx = func(tx *types.Transaction) (interface{}, error) {
|
||||
return NewRPCTransactionFromBlockHash(block, tx.Hash()), nil
|
||||
}
|
||||
}
|
||||
txs := block.Transactions()
|
||||
@ -145,35 +125,28 @@ func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Has
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
|
||||
func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash, config *params.ChainConfig) *RPCTransaction {
|
||||
// NewRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation.
|
||||
func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction {
|
||||
for idx, tx := range b.Transactions() {
|
||||
if tx.Hash() == hash {
|
||||
return newRPCTransactionFromBlockIndex(b, uint64(idx), config)
|
||||
return newRPCTransactionFromBlockIndex(b, uint64(idx))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignerForTx returns an appropriate Signer for this Transaction
|
||||
func SignerForTx(tx *types.Transaction) types.Signer {
|
||||
// NewRPCTransaction returns a transaction that will serialize to the RPC
|
||||
// representation, with the given location metadata set (if available).
|
||||
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction {
|
||||
var signer types.Signer
|
||||
if tx.Protected() {
|
||||
signer = types.LatestSignerForChainID(tx.ChainId())
|
||||
} else {
|
||||
signer = types.HomesteadSigner{}
|
||||
}
|
||||
return signer
|
||||
}
|
||||
|
||||
// NewRPCTransaction returns a transaction that will serialize to the RPC
|
||||
// representation, with the given location metadata set (if available).
|
||||
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, blockTime uint64, index uint64, baseFee *big.Int, config *params.ChainConfig) *RPCTransaction {
|
||||
signer := types.MakeSigner(config, new(big.Int).SetUint64(blockNumber), blockTime)
|
||||
from, _ := types.Sender(signer, tx)
|
||||
v, r, s := tx.RawSignatureValues()
|
||||
result := &RPCTransaction{
|
||||
Type: hexutil.Uint64(tx.Type()),
|
||||
From: from,
|
||||
Gas: hexutil.Uint64(tx.Gas()),
|
||||
GasPrice: (*hexutil.Big)(tx.GasPrice()),
|
||||
@ -182,6 +155,7 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
|
||||
Nonce: hexutil.Uint64(tx.Nonce()),
|
||||
To: tx.To(),
|
||||
Value: (*hexutil.Big)(tx.Value()),
|
||||
Type: hexutil.Uint64(tx.Type()),
|
||||
V: (*hexutil.Big)(v),
|
||||
R: (*hexutil.Big)(r),
|
||||
S: (*hexutil.Big)(s),
|
||||
@ -191,69 +165,29 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
|
||||
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
|
||||
result.TransactionIndex = (*hexutil.Uint64)(&index)
|
||||
}
|
||||
|
||||
switch tx.Type() {
|
||||
case types.LegacyTxType:
|
||||
// if a legacy transaction has an EIP-155 chain id, include it explicitly
|
||||
if id := tx.ChainId(); id.Sign() != 0 {
|
||||
result.ChainID = (*hexutil.Big)(id)
|
||||
}
|
||||
|
||||
case types.AccessListTxType:
|
||||
al := tx.AccessList()
|
||||
yparity := hexutil.Uint64(v.Sign())
|
||||
result.Accesses = &al
|
||||
result.ChainID = (*hexutil.Big)(tx.ChainId())
|
||||
result.YParity = &yparity
|
||||
|
||||
case types.DynamicFeeTxType:
|
||||
al := tx.AccessList()
|
||||
yparity := hexutil.Uint64(v.Sign())
|
||||
result.Accesses = &al
|
||||
result.ChainID = (*hexutil.Big)(tx.ChainId())
|
||||
result.YParity = &yparity
|
||||
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
|
||||
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
|
||||
// if the transaction has been mined, compute the effective gas price
|
||||
if baseFee != nil && blockHash != (common.Hash{}) {
|
||||
// price = min(gasTipCap + baseFee, gasFeeCap)
|
||||
result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee))
|
||||
// price = min(tip, gasFeeCap - baseFee) + baseFee
|
||||
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
|
||||
result.GasPrice = (*hexutil.Big)(price)
|
||||
} else {
|
||||
result.GasPrice = (*hexutil.Big)(tx.GasFeeCap())
|
||||
result.GasPrice = nil
|
||||
}
|
||||
|
||||
case types.BlobTxType:
|
||||
al := tx.AccessList()
|
||||
yparity := hexutil.Uint64(v.Sign())
|
||||
result.Accesses = &al
|
||||
result.ChainID = (*hexutil.Big)(tx.ChainId())
|
||||
result.YParity = &yparity
|
||||
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
|
||||
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
|
||||
// if the transaction has been mined, compute the effective gas price
|
||||
if baseFee != nil && blockHash != (common.Hash{}) {
|
||||
result.GasPrice = (*hexutil.Big)(effectiveGasPrice(tx, baseFee))
|
||||
} else {
|
||||
result.GasPrice = (*hexutil.Big)(tx.GasFeeCap())
|
||||
}
|
||||
result.MaxFeePerBlobGas = (*hexutil.Big)(tx.BlobGasFeeCap())
|
||||
result.BlobVersionedHashes = tx.BlobHashes()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// effectiveGasPrice computes the transaction gas fee, based on the given basefee value.
|
||||
//
|
||||
// price = min(gasTipCap + baseFee, gasFeeCap)
|
||||
func effectiveGasPrice(tx *types.Transaction, baseFee *big.Int) *big.Int {
|
||||
fee := tx.GasTipCap()
|
||||
fee = fee.Add(fee, baseFee)
|
||||
if tx.GasFeeCapIntCmp(fee) < 0 {
|
||||
return tx.GasFeeCap()
|
||||
}
|
||||
return fee
|
||||
}
|
||||
|
||||
type rpcBlock struct {
|
||||
Hash common.Hash `json:"hash"`
|
||||
Transactions []rpcTransaction `json:"transactions"`
|
||||
@ -315,25 +249,25 @@ func getBlockAndUncleHashes(cli *rpc.Client, ctx context.Context, method string,
|
||||
return types.NewBlockWithHeader(head).WithBody(txs, nil), body.UncleHashes, nil
|
||||
}
|
||||
|
||||
// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation.
|
||||
func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, config *params.ChainConfig) *RPCTransaction {
|
||||
txs := b.Transactions()
|
||||
if index >= uint64(len(txs)) {
|
||||
return nil
|
||||
}
|
||||
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), b.Time(), index, b.BaseFee(), config)
|
||||
}
|
||||
|
||||
// newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index.
|
||||
func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.Bytes {
|
||||
txs := b.Transactions()
|
||||
if index >= uint64(len(txs)) {
|
||||
return nil
|
||||
}
|
||||
blob, _ := txs[index].MarshalBinary()
|
||||
blob, _ := rlp.EncodeToBytes(txs[index])
|
||||
return blob
|
||||
}
|
||||
|
||||
// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation.
|
||||
func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction {
|
||||
txs := b.Transactions()
|
||||
if index >= uint64(len(txs)) {
|
||||
return nil
|
||||
}
|
||||
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee())
|
||||
}
|
||||
|
||||
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
|
||||
arg := map[string]interface{}{
|
||||
"address": q.Addresses,
|
||||
@ -361,110 +295,3 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
func getIteratorAtPath(t state.Trie, startKey []byte) (trie.NodeIterator, int64, error) {
|
||||
startTime := makeTimestamp()
|
||||
var it trie.NodeIterator
|
||||
var err error
|
||||
|
||||
if len(startKey)%2 != 0 {
|
||||
// Zero-pad for odd-length keys, required by HexToKeyBytes()
|
||||
startKey = append(startKey, 0)
|
||||
it, err = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
} else {
|
||||
it, err = t.NodeIterator(nodeiter.HexToKeyBytes(startKey))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
// Step to the required node (not required if original startKey was odd-length)
|
||||
it.Next(true)
|
||||
}
|
||||
|
||||
return it, makeTimestamp() - startTime, nil
|
||||
}
|
||||
|
||||
func fillSliceNodeData(
|
||||
sdb state.Database,
|
||||
nodesMap map[string]string,
|
||||
leavesMap map[string]GetSliceResponseAccount,
|
||||
node StateNode,
|
||||
nodeElements []interface{},
|
||||
storage bool,
|
||||
) (int64, error) {
|
||||
// Populate the nodes map
|
||||
nodeValHash := crypto.Keccak256Hash(node.NodeValue)
|
||||
nodesMap[common.Bytes2Hex(nodeValHash.Bytes())] = common.Bytes2Hex(node.NodeValue)
|
||||
|
||||
// Extract account data if it's a Leaf node
|
||||
leafStartTime := makeTimestamp()
|
||||
if node.NodeType == Leaf && !storage {
|
||||
stateLeafKey, storageRoot, code, err := extractContractAccountInfo(sdb, node, nodeElements)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("GetSlice account lookup error: %w", err)
|
||||
}
|
||||
|
||||
if len(code) > 0 {
|
||||
// Populate the leaves map
|
||||
leavesMap[stateLeafKey] = GetSliceResponseAccount{
|
||||
StorageRoot: storageRoot,
|
||||
EVMCode: common.Bytes2Hex(code),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return makeTimestamp() - leafStartTime, nil
|
||||
}
|
||||
|
||||
func extractContractAccountInfo(sdb state.Database, node StateNode, nodeElements []interface{}) (string, string, []byte, error) {
|
||||
var account types.StateAccount
|
||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
||||
return "", "", nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
||||
}
|
||||
|
||||
if bytes.Equal(account.CodeHash, emptyCodeHash) {
|
||||
return "", "", nil, nil
|
||||
}
|
||||
|
||||
// Extract state leaf key
|
||||
partialPath := utils.CompactToHex(nodeElements[0].([]byte))
|
||||
valueNodePath := append(node.Path, partialPath...)
|
||||
encodedPath := utils.HexToCompact(valueNodePath)
|
||||
leafKey := encodedPath[1:]
|
||||
stateLeafKeyString := common.BytesToHash(leafKey).String()
|
||||
|
||||
storageRootString := account.Root.String()
|
||||
|
||||
// Extract codeHash and get code
|
||||
codeHash := common.BytesToHash(account.CodeHash)
|
||||
codeBytes, err := sdb.ContractCode(common.Address{}, codeHash)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
return stateLeafKeyString, storageRootString, codeBytes, nil
|
||||
}
|
||||
|
||||
// IsLeaf checks if the node we are at is a leaf
|
||||
func IsLeaf(elements []interface{}) (bool, error) {
|
||||
if len(elements) > 2 {
|
||||
return false, nil
|
||||
}
|
||||
if len(elements) < 2 {
|
||||
return false, fmt.Errorf("node cannot be less than two elements in length")
|
||||
}
|
||||
switch elements[0].([]byte)[0] / 16 {
|
||||
case '\x00':
|
||||
return false, nil
|
||||
case '\x01':
|
||||
return false, nil
|
||||
case '\x02':
|
||||
return true, nil
|
||||
case '\x03':
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("unknown hex prefix")
|
||||
}
|
||||
}
|
||||
|
||||
759
pkg/eth/cid_retriever.go
Normal file
759
pkg/eth/cid_retriever.go
Normal file
@ -0,0 +1,759 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
// Retriever interface for substituting mocks in tests
|
||||
type Retriever interface {
|
||||
RetrieveFirstBlockNumber() (int64, error)
|
||||
RetrieveLastBlockNumber() (int64, error)
|
||||
Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDWrapper, bool, error)
|
||||
}
|
||||
|
||||
// CIDRetriever satisfies the CIDRetriever interface for ethereum
|
||||
type CIDRetriever struct {
|
||||
db *sqlx.DB
|
||||
gormDB *gorm.DB
|
||||
}
|
||||
|
||||
type IPLDModelRecord struct {
|
||||
models.IPLDModel
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by IPLD
|
||||
func (IPLDModelRecord) TableName() string {
|
||||
return "public.blocks"
|
||||
}
|
||||
|
||||
type HeaderCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
BlockHash string `gorm:"primaryKey"`
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
ParentHash string
|
||||
Timestamp uint64
|
||||
StateRoot string
|
||||
TotalDifficulty string `gorm:"column:td"`
|
||||
TxRoot string
|
||||
RctRoot string `gorm:"column:receipt_root"`
|
||||
UncleRoot string
|
||||
Bloom []byte
|
||||
MhKey string
|
||||
|
||||
// gorm doesn't check if foreign key exists in database.
|
||||
// It is required to eager load relations using preload.
|
||||
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID,BlockNumber;references:BlockHash,BlockNumber"`
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by HeaderCIDRecord
|
||||
func (HeaderCIDRecord) TableName() string {
|
||||
return "eth.header_cids"
|
||||
}
|
||||
|
||||
type TransactionCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
TxHash string `gorm:"primaryKey"`
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
HeaderID string `gorm:"column:header_id"`
|
||||
Index int64
|
||||
Src string
|
||||
Dst string
|
||||
MhKey string
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by TransactionCIDRecord
|
||||
func (TransactionCIDRecord) TableName() string {
|
||||
return "eth.transaction_cids"
|
||||
}
|
||||
|
||||
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
|
||||
func NewCIDRetriever(db *sqlx.DB) *CIDRetriever {
|
||||
gormDB, err := gorm.Open(postgres.New(postgres.Config{
|
||||
Conn: db,
|
||||
}), &gorm.Config{})
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &CIDRetriever{
|
||||
db: db,
|
||||
gormDB: gormDB,
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
|
||||
func (ecr *CIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number ASC LIMIT 1")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
|
||||
func (ecr *CIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM eth.header_cids ORDER BY block_number DESC LIMIT 1")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
// Retrieve is used to retrieve all of the CIDs which conform to the passed StreamFilters
|
||||
func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64) ([]CIDWrapper, bool, error) {
|
||||
log.Debug("retrieving cids")
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
// Retrieve cached header CIDs at this block height
|
||||
var headers []models.HeaderModel
|
||||
headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error", err)
|
||||
return nil, true, err
|
||||
}
|
||||
cws := make([]CIDWrapper, len(headers))
|
||||
empty := true
|
||||
for i, header := range headers {
|
||||
cw := new(CIDWrapper)
|
||||
cw.BlockNumber = big.NewInt(blockNumber)
|
||||
if !filter.HeaderFilter.Off {
|
||||
cw.Header = header
|
||||
empty = false
|
||||
if filter.HeaderFilter.Uncles {
|
||||
// Retrieve uncle cids for this header id
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
cw.Uncles = uncleCIDs
|
||||
}
|
||||
}
|
||||
// Retrieve cached trx CIDs
|
||||
if !filter.TxFilter.Off {
|
||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("transaction cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
if len(cw.Transactions) > 0 {
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
trxHashes := make([]string, len(cw.Transactions))
|
||||
for j, t := range cw.Transactions {
|
||||
trxHashes[j] = t.TxHash
|
||||
}
|
||||
// Retrieve cached receipt CIDs
|
||||
if !filter.ReceiptFilter.Off {
|
||||
cw.Receipts, err = ecr.RetrieveRctCIDs(tx, filter.ReceiptFilter, 0, header.BlockHash, trxHashes)
|
||||
if err != nil {
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
if len(cw.Receipts) > 0 {
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
// Retrieve cached state CIDs
|
||||
if !filter.StateFilter.Off {
|
||||
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("state cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
if len(cw.StateNodes) > 0 {
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
// Retrieve cached storage CIDs
|
||||
if !filter.StorageFilter.Off {
|
||||
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("storage cid retrieval error")
|
||||
return nil, true, err
|
||||
}
|
||||
if len(cw.StorageNodes) > 0 {
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
cws[i] = *cw
|
||||
}
|
||||
|
||||
return cws, empty, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
||||
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
|
||||
log.Debug("retrieving header cids for block ", blockNumber)
|
||||
headers := make([]models.HeaderModel, 0)
|
||||
pgStr := `SELECT CAST(block_number as Text), block_hash, parent_hash, cid, mh_key, CAST(td as Text), node_id,
|
||||
CAST(reward as Text), state_root, uncle_root,tx_root, receipt_root, bloom, timestamp, times_validated, coinbase
|
||||
FROM eth.header_cids
|
||||
WHERE block_number = $1`
|
||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
|
||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
||||
log.Debug("retrieving uncle cids for block id ", headerID)
|
||||
headers := make([]models.UncleModel, 0)
|
||||
pgStr := `SELECT CAST(block_number as Text), header_id, block_hash, parent_hash, cid, mh_key, CAST(reward as text)
|
||||
FROM eth.uncle_cids
|
||||
WHERE header_id = $1`
|
||||
return headers, tx.Select(&headers, pgStr, headerID)
|
||||
}
|
||||
|
||||
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
||||
// also returns the ids for the returned transaction cids
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID string) ([]models.TxModel, error) {
|
||||
log.Debug("retrieving transaction cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
results := make([]models.TxModel, 0)
|
||||
id := 1
|
||||
pgStr := fmt.Sprintf(`SELECT CAST(transaction_cids.block_number as Text), transaction_cids.tx_hash,
|
||||
transaction_cids.header_id, transaction_cids.cid, transaction_cids.mh_key, transaction_cids.dst,
|
||||
transaction_cids.src, transaction_cids.index, transaction_cids.tx_data, transaction_cids.tx_type
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $%d`, id)
|
||||
args = append(args, headerID)
|
||||
id++
|
||||
if len(txFilter.Dst) > 0 {
|
||||
pgStr += fmt.Sprintf(` AND transaction_cids.dst = ANY($%d::VARCHAR(66)[])`, id)
|
||||
args = append(args, pq.Array(txFilter.Dst))
|
||||
id++
|
||||
}
|
||||
if len(txFilter.Src) > 0 {
|
||||
pgStr += fmt.Sprintf(` AND transaction_cids.src = ANY($%d::VARCHAR(66)[])`, id)
|
||||
args = append(args, pq.Array(txFilter.Src))
|
||||
}
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
return results, tx.Select(&results, pgStr, args...)
|
||||
}
|
||||
|
||||
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
|
||||
for i, topicSet := range topics {
|
||||
if len(topicSet) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !first {
|
||||
pgStr += " AND"
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
|
||||
args = append(args, pq.Array(topicSet))
|
||||
*id++
|
||||
}
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
|
||||
if len(rctFilter.LogAddresses) > 0 {
|
||||
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
|
||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||
*id++
|
||||
}
|
||||
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||
}
|
||||
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
|
||||
rctCond := " AND (receipt_cids.tx_id = ANY ( "
|
||||
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
|
||||
if len(rctFilter.LogAddresses) > 0 {
|
||||
// Filter on log contract addresses if there are any
|
||||
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
|
||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||
*id++
|
||||
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||
}
|
||||
|
||||
pgStr += ")"
|
||||
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else { // If there are no contract addresses to filter on
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr += rctCond + logQuery
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
|
||||
pgStr += ")"
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
// If there are no contract addresses or topics to filter on,
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
}
|
||||
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
id := 1
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index, data,
|
||||
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND log_cids.leaf_mh_key = blocks.key
|
||||
AND log_cids.block_number = blocks.block_number
|
||||
AND header_cids.block_hash = $1`
|
||||
|
||||
args = append(args, blockHash.String())
|
||||
id++
|
||||
|
||||
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||
pgStr += ` ORDER BY log_cids.index`
|
||||
|
||||
logCIDs := make([]LogResult, 0)
|
||||
err := tx.Select(&logCIDs, pgStr, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveFilteredLog retrieves and returns all the log CIDs provided blockHeight or blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT CAST(eth.log_cids.block_number as Text), eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status, header_cids.block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = eth.receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = eth.receipt_cids.block_number
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
args = append(args, blockNumber)
|
||||
id++
|
||||
}
|
||||
if blockHash != nil {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||
args = append(args, blockHash.String())
|
||||
id++
|
||||
}
|
||||
|
||||
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||
pgStr += ` ORDER BY log_cids.index`
|
||||
|
||||
logCIDs := make([]LogResult, 0)
|
||||
err := tx.Select(&logCIDs, pgStr, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||
// filter parameters and correspond to the provided tx ids
|
||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash string, txHashes []string) ([]models.ReceiptModel, error) {
|
||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 5)
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id,
|
||||
receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number`
|
||||
id := 1
|
||||
if blockNumber > 0 {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||
args = append(args, blockNumber)
|
||||
id++
|
||||
}
|
||||
if blockHash != "" {
|
||||
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||
args = append(args, blockHash)
|
||||
id++
|
||||
}
|
||||
|
||||
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, txHashes)
|
||||
|
||||
pgStr += ` ORDER BY transaction_cids.index`
|
||||
receiptCIDs := make([]models.ReceiptModel, 0)
|
||||
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
||||
}
|
||||
|
||||
func hasTopics(topics [][]string) bool {
|
||||
for _, topicSet := range topics {
|
||||
if len(topicSet) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
|
||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
||||
log.Debug("retrieving state cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 2)
|
||||
pgStr := `SELECT CAST(state_cids.block_number as Text), state_cids.header_id,
|
||||
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
addrLen := len(stateFilter.Addresses)
|
||||
if addrLen > 0 {
|
||||
keys := make([]string, addrLen)
|
||||
for i, addr := range stateFilter.Addresses {
|
||||
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
|
||||
}
|
||||
pgStr += ` AND state_cids.state_leaf_key = ANY($2::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(keys))
|
||||
}
|
||||
if !stateFilter.IntermediateNodes {
|
||||
pgStr += ` AND state_cids.node_type = 2`
|
||||
}
|
||||
stateNodeCIDs := make([]models.StateNodeModel, 0)
|
||||
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
|
||||
}
|
||||
|
||||
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
|
||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||
log.Debug("retrieving storage cids for header id ", headerID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
pgStr := `SELECT CAST(storage_cids.block_number as Text), storage_cids.header_id, storage_cids.storage_leaf_key,
|
||||
storage_cids.node_type, storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path,
|
||||
state_cids.state_leaf_key
|
||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||
WHERE storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
AND storage_cids.block_number = state_cids.block_number
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
AND header_cids.block_hash = $1`
|
||||
args = append(args, headerID)
|
||||
id := 2
|
||||
addrLen := len(storageFilter.Addresses)
|
||||
if addrLen > 0 {
|
||||
keys := make([]string, addrLen)
|
||||
for i, addr := range storageFilter.Addresses {
|
||||
keys[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes()).String()
|
||||
}
|
||||
pgStr += fmt.Sprintf(` AND state_cids.state_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
|
||||
args = append(args, pq.Array(keys))
|
||||
id++
|
||||
}
|
||||
if len(storageFilter.StorageKeys) > 0 {
|
||||
pgStr += fmt.Sprintf(` AND storage_cids.storage_leaf_key = ANY($%d::VARCHAR(66)[])`, id)
|
||||
args = append(args, pq.Array(storageFilter.StorageKeys))
|
||||
}
|
||||
if !storageFilter.IntermediateNodes {
|
||||
pgStr += ` AND storage_cids.node_type = 2`
|
||||
}
|
||||
storageNodeCIDs := make([]models.StorageNodeWithStateKeyModel, 0)
|
||||
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
|
||||
}
|
||||
|
||||
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
|
||||
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
|
||||
log.Debug("retrieving block cids for block hash ", blockHash.String())
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
var headerCID models.HeaderModel
|
||||
headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash)
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txHashes := make([]string, len(txCIDs))
|
||||
for i, txCID := range txCIDs {
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID.BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
return headerCID, uncleCIDs, txCIDs, rctCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
|
||||
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
|
||||
log.Debug("retrieving block cids for block number ", blockNumber)
|
||||
|
||||
// Begin new db tx
|
||||
tx, err := ecr.db.Beginx()
|
||||
if err != nil {
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
var headerCID []models.HeaderModel
|
||||
headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
if len(headerCID) < 1 {
|
||||
return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
|
||||
}
|
||||
var uncleCIDs []models.UncleModel
|
||||
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||
if err != nil {
|
||||
log.Error("uncle cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
var txCIDs []models.TxModel
|
||||
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("tx cid retrieval error")
|
||||
return models.HeaderModel{}, nil, nil, nil, err
|
||||
}
|
||||
txHashes := make([]string, len(txCIDs))
|
||||
for i, txCID := range txCIDs {
|
||||
txHashes[i] = txCID.TxHash
|
||||
}
|
||||
var rctCIDs []models.ReceiptModel
|
||||
rctCIDs, err = ecr.RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx, headerCID[0].BlockHash, txHashes, blockNumber)
|
||||
if err != nil {
|
||||
log.Error("rct cid retrieval error")
|
||||
}
|
||||
return headerCID[0], uncleCIDs, txCIDs, rctCIDs, err
|
||||
}
|
||||
|
||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
||||
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
|
||||
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
||||
pgStr := `SELECT block_hash, CAST(block_number as Text), parent_hash, cid, mh_key, CAST(td as Text),
|
||||
state_root, uncle_root, tx_root, receipt_root, bloom, timestamp FROM eth.header_cids
|
||||
WHERE block_hash = $1`
|
||||
var headerCID models.HeaderModel
|
||||
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
||||
}
|
||||
|
||||
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string, blockNumber int64) ([]models.TxModel, error) {
|
||||
log.Debug("retrieving tx cids for block id ", headerID)
|
||||
pgStr := `SELECT CAST(block_number as Text), header_id, index, tx_hash, cid, mh_key,
|
||||
dst, src, tx_data, tx_type, value
|
||||
FROM eth.transaction_cids
|
||||
WHERE header_id = $1 AND block_number = $2
|
||||
ORDER BY index`
|
||||
var txCIDs []models.TxModel
|
||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID, blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveReceiptCIDsByByHeaderIDAndTxIDs retrieves receipt CIDs by their associated tx IDs for the given header id
|
||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByByHeaderIDAndTxIDs(tx *sqlx.Tx, headerID string, txHashes []string, blockNumber int64) ([]models.ReceiptModel, error) {
|
||||
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||
pgStr := `SELECT CAST(receipt_cids.block_number as Text), receipt_cids.header_id, receipt_cids.tx_id, receipt_cids.leaf_cid,
|
||||
receipt_cids.leaf_mh_key, receipt_cids.contract, receipt_cids.contract_hash
|
||||
FROM eth.receipt_cids, eth.transaction_cids
|
||||
WHERE tx_id = ANY($2)
|
||||
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
AND transaction_cids.header_id = $1
|
||||
AND transaction_cids.block_number = $3
|
||||
ORDER BY transaction_cids.index`
|
||||
var rctCIDs []models.ReceiptModel
|
||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, headerID, pq.Array(txHashes), blockNumber)
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
|
||||
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, "header_cids.block_number = ?", blockNumber).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return headerCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash
|
||||
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, "block_hash = ?", blockHash.String()).Error
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderCIDRecord{}, err
|
||||
}
|
||||
|
||||
if len(headerCIDs) == 0 {
|
||||
return HeaderCIDRecord{}, errHeaderHashNotFound
|
||||
} else if len(headerCIDs) > 1 {
|
||||
return HeaderCIDRecord{}, errMultipleHeadersForHash
|
||||
}
|
||||
|
||||
return headerCIDs[0], nil
|
||||
}
|
||||
|
||||
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
||||
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
||||
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||
|
||||
var txCIDs []TransactionCIDRecord
|
||||
|
||||
err := ecr.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return TransactionCIDRecord{}, err
|
||||
}
|
||||
|
||||
if len(txCIDs) == 0 {
|
||||
return TransactionCIDRecord{}, errTxHashNotFound
|
||||
} else if len(txCIDs) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
return txCIDs[0], nil
|
||||
}
|
||||
539
pkg/eth/cid_retriever_test.go
Normal file
539
pkg/eth/cid_retriever_test.go
Normal file
@ -0,0 +1,539 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
openFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{},
|
||||
TxFilter: eth.TxFilter{},
|
||||
ReceiptFilter: eth.ReceiptFilter{},
|
||||
StateFilter: eth.StateFilter{},
|
||||
StorageFilter: eth.StorageFilter{},
|
||||
}
|
||||
rctAddressFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
LogAddresses: []string{test_helpers.Address.String()},
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctTopicsFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000004"}},
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctTopicsAndAddressFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
Topics: [][]string{
|
||||
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
|
||||
{"0x0000000000000000000000000000000000000000000000000000000000000006"},
|
||||
},
|
||||
LogAddresses: []string{test_helpers.Address.String()},
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctTopicsAndAddressFilterFail = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
Topics: [][]string{
|
||||
{"0x0000000000000000000000000000000000000000000000000000000000000004"},
|
||||
{"0x0000000000000000000000000000000000000000000000000000000000000007"}, // This topic won't match on the mocks.Address.String() contract receipt
|
||||
},
|
||||
LogAddresses: []string{test_helpers.Address.String()},
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctAddressesAndTopicFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000005"}},
|
||||
LogAddresses: []string{test_helpers.Address.String(), test_helpers.AnotherAddress.String()},
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctsForAllCollectedTrxs = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{}, // Trx filter open so we will collect all trxs, therefore we will also collect all corresponding rcts despite rct filter
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
MatchTxs: true,
|
||||
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
|
||||
LogAddresses: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
rctsForSelectCollectedTrxs = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Dst: []string{test_helpers.AnotherAddress.String()}, // We only filter for one of the trxs so we will only get the one corresponding receipt
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
MatchTxs: true,
|
||||
Topics: [][]string{{"0x0000000000000000000000000000000000000000000000000000000000000006"}}, // Topic0 isn't one of the topic0s we have
|
||||
LogAddresses: []string{"0x0000000000000000000000000000000000000002"}, // Contract isn't one of the contracts we have
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Off: true,
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
stateFilter = eth.SubscriptionSettings{
|
||||
Start: big.NewInt(0),
|
||||
End: big.NewInt(1),
|
||||
HeaderFilter: eth.HeaderFilter{
|
||||
Off: true,
|
||||
},
|
||||
TxFilter: eth.TxFilter{
|
||||
Off: true,
|
||||
},
|
||||
ReceiptFilter: eth.ReceiptFilter{
|
||||
Off: true,
|
||||
},
|
||||
StateFilter: eth.StateFilter{
|
||||
Addresses: []string{test_helpers.AccountAddresss.Hex()},
|
||||
},
|
||||
StorageFilter: eth.StorageFilter{
|
||||
Off: true,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var _ = Describe("Retriever", func() {
|
||||
var (
|
||||
db *sqlx.DB
|
||||
diffIndexer interfaces.StateDiffIndexer
|
||||
retriever *eth.CIDRetriever
|
||||
)
|
||||
BeforeEach(func() {
|
||||
db = shared.SetupDB()
|
||||
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
retriever = eth.NewCIDRetriever(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
shared.TearDownDB(db)
|
||||
})
|
||||
|
||||
Describe("Retrieve", func() {
|
||||
BeforeEach(func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
for _, node := range test_helpers.MockStateNodes {
|
||||
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
|
||||
type rctCIDAndMHKeyResult struct {
|
||||
LeafCID string `db:"leaf_cid"`
|
||||
LeafMhKey string `db:"leaf_mh_key"`
|
||||
}
|
||||
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
|
||||
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND header_cids.block_number = $1
|
||||
ORDER BY transaction_cids.index`
|
||||
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids)).To(Equal(1))
|
||||
Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
|
||||
expectedHeaderCID := test_helpers.MockCIDWrapper.Header
|
||||
expectedHeaderCID.BlockHash = cids[0].Header.BlockHash
|
||||
expectedHeaderCID.NodeID = cids[0].Header.NodeID
|
||||
Expect(cids[0].Header).To(Equal(expectedHeaderCID))
|
||||
Expect(len(cids[0].Transactions)).To(Equal(4))
|
||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
|
||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
|
||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
|
||||
Expect(len(cids[0].Receipts)).To(Equal(4))
|
||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
|
||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
|
||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
|
||||
Expect(len(cids[0].StateNodes)).To(Equal(2))
|
||||
|
||||
for _, stateNode := range cids[0].StateNodes {
|
||||
if stateNode.CID == test_helpers.State1CID.String() {
|
||||
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex()))
|
||||
Expect(stateNode.NodeType).To(Equal(2))
|
||||
Expect(stateNode.Path).To(Equal([]byte{'\x06'}))
|
||||
}
|
||||
if stateNode.CID == test_helpers.State2CID.String() {
|
||||
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.AccountLeafKey).Hex()))
|
||||
Expect(stateNode.NodeType).To(Equal(2))
|
||||
Expect(stateNode.Path).To(Equal([]byte{'\x0c'}))
|
||||
}
|
||||
}
|
||||
Expect(len(cids[0].StorageNodes)).To(Equal(1))
|
||||
expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes
|
||||
expectedStorageNodeCIDs[0].HeaderID = cids[0].StorageNodes[0].HeaderID
|
||||
expectedStorageNodeCIDs[0].StatePath = cids[0].StorageNodes[0].StatePath
|
||||
Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs))
|
||||
})
|
||||
|
||||
It("Applies filters from the provided config.Subscription", func() {
|
||||
type rctCIDAndMHKeyResult struct {
|
||||
LeafCID string `db:"leaf_cid"`
|
||||
LeafMhKey string `db:"leaf_mh_key"`
|
||||
}
|
||||
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
|
||||
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND header_cids.block_number = $1
|
||||
ORDER BY transaction_cids.index`
|
||||
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids1)).To(Equal(1))
|
||||
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids1[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids1[0].Transactions)).To(Equal(0))
|
||||
Expect(len(cids1[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids1[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids1[0].Receipts)).To(Equal(1))
|
||||
expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0]
|
||||
expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID
|
||||
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||
Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||
|
||||
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids2)).To(Equal(1))
|
||||
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids2[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids2[0].Transactions)).To(Equal(0))
|
||||
Expect(len(cids2[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids2[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids2[0].Receipts)).To(Equal(1))
|
||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
||||
expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID
|
||||
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||
Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||
|
||||
cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids3)).To(Equal(1))
|
||||
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids3[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids3[0].Transactions)).To(Equal(0))
|
||||
Expect(len(cids3[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids3[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids3[0].Receipts)).To(Equal(1))
|
||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
||||
expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID
|
||||
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||
Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||
|
||||
cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids4)).To(Equal(1))
|
||||
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids4[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids4[0].Transactions)).To(Equal(0))
|
||||
Expect(len(cids4[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids4[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids4[0].Receipts)).To(Equal(1))
|
||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
||||
expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID
|
||||
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
|
||||
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
|
||||
Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||
|
||||
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids5)).To(Equal(1))
|
||||
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids5[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids5[0].Transactions)).To(Equal(4))
|
||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
|
||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
|
||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
|
||||
Expect(len(cids5[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids5[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids5[0].Receipts)).To(Equal(4))
|
||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
|
||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
|
||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
|
||||
|
||||
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids6)).To(Equal(1))
|
||||
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids6[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids6[0].Transactions)).To(Equal(1))
|
||||
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
|
||||
expectedTxCID.TxHash = cids6[0].Transactions[0].TxHash
|
||||
expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID
|
||||
Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID))
|
||||
Expect(len(cids6[0].StateNodes)).To(Equal(0))
|
||||
Expect(len(cids6[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids6[0].Receipts)).To(Equal(1))
|
||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
||||
expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID
|
||||
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
|
||||
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
|
||||
Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||
|
||||
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).ToNot(BeTrue())
|
||||
Expect(len(cids7)).To(Equal(1))
|
||||
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||
Expect(cids7[0].Header).To(Equal(models.HeaderModel{}))
|
||||
Expect(len(cids7[0].Transactions)).To(Equal(0))
|
||||
Expect(len(cids7[0].Receipts)).To(Equal(0))
|
||||
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
||||
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
||||
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
|
||||
BlockNumber: "1",
|
||||
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
||||
CID: test_helpers.State2CID.String(),
|
||||
MhKey: test_helpers.State2MhKey,
|
||||
Path: []byte{'\x0c'},
|
||||
}))
|
||||
|
||||
_, empty, err = retriever.Retrieve(rctTopicsAndAddressFilterFail, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(empty).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("RetrieveFirstBlockNumber", func() {
|
||||
It("Throws an error if there are no blocks in the database", func() {
|
||||
_, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
payload := test_helpers.MockConvertedPayload
|
||||
payload.Block = newMockBlock(1010101)
|
||||
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
payload1 := test_helpers.MockConvertedPayload
|
||||
payload1.Block = newMockBlock(1010101)
|
||||
payload2 := payload1
|
||||
payload2.Block = newMockBlock(5)
|
||||
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(5)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("RetrieveLastBlockNumber", func() {
|
||||
It("Throws an error if there are no blocks in the database", func() {
|
||||
_, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
payload := test_helpers.MockConvertedPayload
|
||||
payload.Block = newMockBlock(1010101)
|
||||
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
payload1 := test_helpers.MockConvertedPayload
|
||||
payload1.Block = newMockBlock(1010101)
|
||||
payload2 := payload1
|
||||
payload2.Block = newMockBlock(5)
|
||||
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newMockBlock(blockNumber uint64) *types.Block {
|
||||
header := test_helpers.MockHeader
|
||||
header.Number.SetUint64(blockNumber)
|
||||
return types.NewBlock(&test_helpers.MockHeader, test_helpers.MockTransactions, nil, test_helpers.MockReceipts, new(trie.Trie))
|
||||
}
|
||||
@ -1,17 +0,0 @@
|
||||
package eth
|
||||
|
||||
import "fmt"
|
||||
|
||||
type RequiresProxyError struct {
|
||||
method string
|
||||
}
|
||||
|
||||
var _ error = RequiresProxyError{}
|
||||
|
||||
func (e RequiresProxyError) SetMethod(method string) {
|
||||
e.method = method
|
||||
}
|
||||
|
||||
func (e RequiresProxyError) Error() string {
|
||||
return fmt.Sprintf("%s requires a configured proxy geth node", e.method)
|
||||
}
|
||||
533
pkg/eth/eth_state_test.go
Normal file
533
pkg/eth/eth_state_test.go
Normal file
@ -0,0 +1,533 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
parsedABI abi.ABI
|
||||
)
|
||||
|
||||
func init() {
|
||||
// load abi
|
||||
abiBytes, err := ioutil.ReadFile("./test_helpers/abi.json")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
parsedABI, err = abi.JSON(bytes.NewReader(abiBytes))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
var _ = Describe("eth state reading tests", func() {
|
||||
const chainLength = 5
|
||||
var (
|
||||
blocks []*types.Block
|
||||
receipts []types.Receipts
|
||||
chain *core.BlockChain
|
||||
db *sqlx.DB
|
||||
api *eth.PublicEthAPI
|
||||
backend *eth.Backend
|
||||
chainConfig = params.TestChainConfig
|
||||
mockTD = big.NewInt(1337)
|
||||
expectedCanonicalHeader map[string]interface{}
|
||||
)
|
||||
It("test init", func() {
|
||||
// db and type initializations
|
||||
var err error
|
||||
db = shared.SetupDB()
|
||||
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||
StateDB: ethServerShared.GroupConfig{
|
||||
Name: "eth_state_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
|
||||
|
||||
// make the test blockchain (and state)
|
||||
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen)
|
||||
params := statediff.Params{
|
||||
IntermediateStateNodes: true,
|
||||
IntermediateStorageNodes: true,
|
||||
}
|
||||
canonicalHeader := blocks[1].Header()
|
||||
expectedCanonicalHeader = map[string]interface{}{
|
||||
"number": (*hexutil.Big)(canonicalHeader.Number),
|
||||
"hash": canonicalHeader.Hash(),
|
||||
"parentHash": canonicalHeader.ParentHash,
|
||||
"nonce": canonicalHeader.Nonce,
|
||||
"mixHash": canonicalHeader.MixDigest,
|
||||
"sha3Uncles": canonicalHeader.UncleHash,
|
||||
"logsBloom": canonicalHeader.Bloom,
|
||||
"stateRoot": canonicalHeader.Root,
|
||||
"miner": canonicalHeader.Coinbase,
|
||||
"difficulty": (*hexutil.Big)(canonicalHeader.Difficulty),
|
||||
"extraData": hexutil.Bytes([]byte{}),
|
||||
"size": hexutil.Uint64(canonicalHeader.Size()),
|
||||
"gasLimit": hexutil.Uint64(canonicalHeader.GasLimit),
|
||||
"gasUsed": hexutil.Uint64(canonicalHeader.GasUsed),
|
||||
"timestamp": hexutil.Uint64(canonicalHeader.Time),
|
||||
"transactionsRoot": canonicalHeader.TxHash,
|
||||
"receiptsRoot": canonicalHeader.ReceiptHash,
|
||||
"totalDifficulty": (*hexutil.Big)(mockTD),
|
||||
}
|
||||
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||
builder := statediff.NewBuilder(chain.StateCache())
|
||||
for i, block := range blocks {
|
||||
var args statediff.Args
|
||||
var rcts types.Receipts
|
||||
if i == 0 {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: common.Hash{},
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
} else {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: blocks[i-1].Root(),
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
rcts = receipts[i-1]
|
||||
}
|
||||
diff, err := builder.BuildStateDiffObject(args, params)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
tx, err := transformer.PushBlock(block, rcts, mockTD)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, node := range diff.Nodes {
|
||||
err = transformer.PushStateNode(tx, node, block.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The non-canonical header has a child
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
hash := sdtypes.CodeAndCodeHash{
|
||||
Hash: test_helpers.CodeHash,
|
||||
Code: test_helpers.ContractCode,
|
||||
}
|
||||
|
||||
err = indexAndPublisher.PushCodeAndCodeHash(tx, hash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// wait for tx batch process to complete.
|
||||
time.Sleep(10000 * time.Millisecond)
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
defer It("test teardown", func() {
|
||||
shared.TearDownDB(db)
|
||||
chain.Stop()
|
||||
})
|
||||
|
||||
Describe("eth_call", func() {
|
||||
It("Applies call args (tx data) on top of state, returning the result (e.g. a Getter method call)", func() {
|
||||
data, err := parsedABI.Pack("data")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
bdata := hexutil.Bytes(data)
|
||||
callArgs := eth.CallArgs{
|
||||
To: &test_helpers.ContractAddr,
|
||||
Data: &bdata,
|
||||
}
|
||||
// Before contract deployment, returns nil
|
||||
res, err := api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(0), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(BeNil())
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(1), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(BeNil())
|
||||
|
||||
// After deployment
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(2), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(3), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(4), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(5), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
})
|
||||
})
|
||||
|
||||
var (
|
||||
expectedContractBalance = (*hexutil.Big)(common.Big0)
|
||||
expectedBankBalanceBlock0 = (*hexutil.Big)(test_helpers.TestBankFunds)
|
||||
|
||||
expectedAcct1BalanceBlock1 = (*hexutil.Big)(big.NewInt(10000))
|
||||
expectedBankBalanceBlock1 = (*hexutil.Big)(new(big.Int).Sub(test_helpers.TestBankFunds, big.NewInt(10000)))
|
||||
|
||||
expectedAcct2BalanceBlock2 = (*hexutil.Big)(big.NewInt(1000))
|
||||
expectedBankBalanceBlock2 = (*hexutil.Big)(new(big.Int).Sub(expectedBankBalanceBlock1.ToInt(), big.NewInt(1000)))
|
||||
|
||||
expectedAcct2BalanceBlock3 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock2.ToInt(), test_helpers.MiningReward))
|
||||
|
||||
expectedAcct2BalanceBlock4 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock3.ToInt(), test_helpers.MiningReward))
|
||||
|
||||
expectedAcct1BalanceBlock5 = (*hexutil.Big)(new(big.Int).Add(expectedAcct1BalanceBlock1.ToInt(), test_helpers.MiningReward))
|
||||
)
|
||||
|
||||
Describe("eth_getBalance", func() {
|
||||
It("Retrieves the eth balance for the provided account address at the block with the provided number", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
})
|
||||
It("Retrieves the eth balance for the provided account address at the block with the provided hash", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||
})
|
||||
It("Returns `0` for an account it cannot find the balance for an account at the provided block number", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
})
|
||||
It("Returns `0` for an error for an account it cannot find the balance for an account at the provided block hash", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getCode", func() {
|
||||
It("Retrieves the code for the provided contract address at the block with the provided number", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
|
||||
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
})
|
||||
It("Retrieves the code for the provided contract address at the block with the provided hash", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
|
||||
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
})
|
||||
It("Returns `nil` for an account it cannot find the code for", func() {
|
||||
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(BeEmpty())
|
||||
})
|
||||
It("Returns `nil` for a contract that doesn't exist at this height", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getStorageAt", func() {
|
||||
It("Returns empty slice if it tries to access a contract which does not exist", func() {
|
||||
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
|
||||
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Returns empty slice if it tries to access a contract slot which does not exist", func() {
|
||||
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
|
||||
// After deployment
|
||||
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Throws an error for a non-existing block hash", func() {
|
||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError("header for hash not found"))
|
||||
})
|
||||
It("Throws an error for a non-existing block number", func() {
|
||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getHeaderByNumber", func() {
|
||||
It("Finds the canonical header based on the header's weight relative to others at the provided height", func() {
|
||||
header, err := api.GetHeaderByNumber(ctx, number)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(header).To(Equal(expectedCanonicalHeader))
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -17,13 +17,19 @@
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestETHSuite(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ipld-eth-server/pkg/eth")
|
||||
RunSpecs(t, "eth ipld server eth suite test")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
||||
|
||||
369
pkg/eth/filterer.go
Normal file
369
pkg/eth/filterer.go
Normal file
@ -0,0 +1,369 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// Filterer interface for substituing mocks in tests
|
||||
type Filterer interface {
|
||||
Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error)
|
||||
}
|
||||
|
||||
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
|
||||
type ResponseFilterer struct{}
|
||||
|
||||
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
|
||||
func NewResponseFilterer() *ResponseFilterer {
|
||||
return &ResponseFilterer{}
|
||||
}
|
||||
|
||||
// Filter is used to filter through eth data to extract and package requested data into a Payload
|
||||
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) {
|
||||
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
|
||||
response := new(IPLDs)
|
||||
response.TotalDifficulty = payload.TotalDifficulty
|
||||
if err := s.filterHeaders(filter.HeaderFilter, response, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txHashes, err := s.filterTransactions(filter.TxFilter, response, payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var filterTxs []common.Hash
|
||||
if filter.ReceiptFilter.MatchTxs {
|
||||
filterTxs = txHashes
|
||||
}
|
||||
if err := s.filerReceipts(filter.ReceiptFilter, response, payload, filterTxs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.filterStateAndStorage(filter.StateFilter, filter.StorageFilter, response, payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.BlockNumber = payload.Block.Number()
|
||||
return response, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||
if !headerFilter.Off {
|
||||
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cid, err := ipld.RawdataToCid(ipld.MEthHeader, headerRLP, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Header = models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: headerRLP,
|
||||
Key: cid.String(),
|
||||
}
|
||||
if headerFilter.Uncles {
|
||||
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
|
||||
for i, uncle := range payload.Block.Body().Uncles {
|
||||
uncleRlp, err := rlp.EncodeToBytes(uncle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cid, err := ipld.RawdataToCid(ipld.MEthHeader, uncleRlp, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Uncles[i] = models.IPLDModel{
|
||||
BlockNumber: uncle.Number.String(),
|
||||
Data: uncleRlp,
|
||||
Key: cid.String(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRange(start, end, actual int64) bool {
|
||||
if (end <= 0 || end >= actual) && start <= actual {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) {
|
||||
var trxHashes []common.Hash
|
||||
if !trxFilter.Off {
|
||||
trxLen := len(payload.Block.Body().Transactions)
|
||||
trxHashes = make([]common.Hash, 0, trxLen)
|
||||
response.Transactions = make([]models.IPLDModel, 0, trxLen)
|
||||
for i, trx := range payload.Block.Body().Transactions {
|
||||
// TODO: check if want corresponding receipt and if we do we must include this transaction
|
||||
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
|
||||
trxBuffer := new(bytes.Buffer)
|
||||
if err := trx.EncodeRLP(trxBuffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := trxBuffer.Bytes()
|
||||
cid, err := ipld.RawdataToCid(ipld.MEthTx, data, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Transactions = append(response.Transactions, models.IPLDModel{
|
||||
Data: data,
|
||||
Key: cid.String(),
|
||||
})
|
||||
trxHashes = append(trxHashes, trx.Hash())
|
||||
}
|
||||
}
|
||||
}
|
||||
return trxHashes, nil
|
||||
}
|
||||
|
||||
// checkTransactionAddrs returns true if either the transaction src and dst are one of the wanted src and dst addresses
|
||||
func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst string) bool {
|
||||
// If we aren't filtering for any addresses, every transaction is a go
|
||||
if len(wantedDst) == 0 && len(wantedSrc) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, src := range wantedSrc {
|
||||
if src == actualSrc {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dst := range wantedDst {
|
||||
if dst == actualDst {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
|
||||
if !receiptFilter.Off {
|
||||
response.Receipts = make([]models.IPLDModel, 0, len(payload.Receipts))
|
||||
rctLeafCID, rctIPLDData, err := GetRctLeafNodeData(payload.Receipts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for idx, receipt := range payload.Receipts {
|
||||
// topics is always length 4
|
||||
topics := make([][]string, 4)
|
||||
contracts := make([]string, len(receipt.Logs))
|
||||
for _, l := range receipt.Logs {
|
||||
contracts = append(contracts, l.Address.String())
|
||||
for idx, t := range l.Topics {
|
||||
topics[idx] = append(topics[idx], t.String())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Verify this filter logic.
|
||||
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
||||
response.Receipts = append(response.Receipts, models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: rctIPLDData[idx],
|
||||
Key: rctLeafCID[idx].String(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics [][]string, wantedAddresses []string, actualAddresses []string, wantedTrxHashes []common.Hash) bool {
|
||||
// If we aren't filtering for any topics, contracts, or corresponding trxs then all receipts are a go
|
||||
if len(wantedTopics) == 0 && len(wantedAddresses) == 0 && len(wantedTrxHashes) == 0 {
|
||||
return true
|
||||
}
|
||||
// Keep receipts that are from watched txs
|
||||
for _, wantedTrxHash := range wantedTrxHashes {
|
||||
if bytes.Equal(wantedTrxHash.Bytes(), rct.TxHash.Bytes()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are no wanted contract addresses, we keep all receipts that match the topic filter
|
||||
if len(wantedAddresses) == 0 {
|
||||
if match := filterMatch(wantedTopics, actualTopics); match == true {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If there are wanted contract addresses to filter on
|
||||
for _, wantedAddr := range wantedAddresses {
|
||||
// and this is an address of interest
|
||||
for _, actualAddr := range actualAddresses {
|
||||
if wantedAddr == actualAddr {
|
||||
// we keep the receipt if it matches on the topic filter
|
||||
if match := filterMatch(wantedTopics, actualTopics); match == true {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// filterMatch returns true if the actualTopics conform to the wantedTopics filter
|
||||
func filterMatch(wantedTopics, actualTopics [][]string) bool {
|
||||
// actualTopics should always be length 4, but the members can be nil slices
|
||||
matches := 0
|
||||
for i, actualTopicSet := range actualTopics {
|
||||
if i < len(wantedTopics) && len(wantedTopics[i]) > 0 {
|
||||
// If we have topics in this filter slot, count as a match if one of the topics matches
|
||||
matches += slicesShareString(actualTopicSet, wantedTopics[i])
|
||||
} else {
|
||||
// Filter slot is either empty or doesn't exist => not matching any topics at this slot => counts as a match
|
||||
matches++
|
||||
}
|
||||
}
|
||||
if matches == 4 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// returns 1 if the two slices have a string in common, 0 if they do not
|
||||
func slicesShareString(slice1, slice2 []string) int {
|
||||
for _, str1 := range slice1 {
|
||||
for _, str2 := range slice2 {
|
||||
if str1 == str2 {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters
|
||||
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
|
||||
response.StorageNodes = make([]StorageNode, 0)
|
||||
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
|
||||
for i, addr := range stateFilter.Addresses {
|
||||
stateAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
|
||||
}
|
||||
storageAddressFilters := make([]common.Hash, len(storageFilter.Addresses))
|
||||
for i, addr := range storageFilter.Addresses {
|
||||
storageAddressFilters[i] = crypto.Keccak256Hash(common.HexToAddress(addr).Bytes())
|
||||
}
|
||||
storageKeyFilters := make([]common.Hash, len(storageFilter.StorageKeys))
|
||||
for i, store := range storageFilter.StorageKeys {
|
||||
storageKeyFilters[i] = common.HexToHash(store)
|
||||
}
|
||||
for _, stateNode := range payload.StateNodes {
|
||||
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
|
||||
if stateNode.NodeType == sdtypes.Leaf || stateFilter.IntermediateNodes {
|
||||
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.NodeValue, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.StateNodes = append(response.StateNodes, StateNode{
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
Path: stateNode.Path,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: stateNode.NodeValue,
|
||||
Key: cid.String(),
|
||||
},
|
||||
Type: stateNode.NodeType,
|
||||
})
|
||||
}
|
||||
}
|
||||
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
|
||||
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
|
||||
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
|
||||
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.NodeValue, multihash.KECCAK_256)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.StorageNodes = append(response.StorageNodes, StorageNode{
|
||||
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: payload.Block.Number().String(),
|
||||
Data: storageNode.NodeValue,
|
||||
Key: cid.String(),
|
||||
},
|
||||
Type: storageNode.NodeType,
|
||||
Path: storageNode.Path,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkNodeKeys(wantedKeys []common.Hash, actualKey []byte) bool {
|
||||
// If we aren't filtering for any specific keys, all nodes are a go
|
||||
if len(wantedKeys) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, key := range wantedKeys {
|
||||
if bytes.Equal(key.Bytes(), actualKey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRctLeafNodeData converts the receipts to receipt trie and returns the receipt leaf node IPLD data and
|
||||
// corresponding CIDs
|
||||
func GetRctLeafNodeData(rcts types.Receipts) ([]cid.Cid, [][]byte, error) {
|
||||
receiptTrie := ipld.NewRctTrie()
|
||||
for idx, rct := range rcts {
|
||||
ethRct, err := ipld.NewReceipt(rct)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = receiptTrie.Add(idx, ethRct.RawData()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
rctLeafNodes, keys, err := receiptTrie.GetLeafNodes()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ethRctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
|
||||
ethRctleafNodeData := make([][]byte, len(rctLeafNodes))
|
||||
for i, rln := range rctLeafNodes {
|
||||
var idx uint
|
||||
|
||||
r := bytes.NewReader(keys[i].TrieKey)
|
||||
err = rlp.Decode(r, &idx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ethRctleafNodeCids[idx] = rln.Cid()
|
||||
ethRctleafNodeData[idx] = rln.RawData()
|
||||
}
|
||||
|
||||
return ethRctleafNodeCids, ethRctleafNodeData, nil
|
||||
}
|
||||
208
pkg/eth/filterer_test.go
Normal file
208
pkg/eth/filterer_test.go
Normal file
@ -0,0 +1,208 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
filterer *eth.ResponseFilterer
|
||||
)
|
||||
|
||||
var _ = Describe("Filterer", func() {
|
||||
Describe("FilterResponse", func() {
|
||||
BeforeEach(func() {
|
||||
filterer = eth.NewResponseFilterer()
|
||||
})
|
||||
|
||||
It("Transcribes all the data from the IPLDPayload into the StreamPayload if given an open filter", func() {
|
||||
iplds, err := filterer.Filter(openFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds).ToNot(BeNil())
|
||||
Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
|
||||
var expectedEmptyUncles []models.IPLDModel
|
||||
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
|
||||
Expect(len(iplds.Transactions)).To(Equal(4))
|
||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue())
|
||||
Expect(len(iplds.Receipts)).To(Equal(4))
|
||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
|
||||
Expect(len(iplds.StateNodes)).To(Equal(2))
|
||||
for _, stateNode := range iplds.StateNodes {
|
||||
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State2IPLD.RawData(),
|
||||
Key: test_helpers.State2IPLD.Cid().String(),
|
||||
}))
|
||||
}
|
||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
||||
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State1IPLD.RawData(),
|
||||
Key: test_helpers.State1IPLD.Cid().String(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
Expect(iplds.StorageNodes).To(Equal(test_helpers.MockIPLDs.StorageNodes))
|
||||
})
|
||||
|
||||
It("Applies filters from the provided config.Subscription", func() {
|
||||
iplds1, err := filterer.Filter(rctAddressFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds1).ToNot(BeNil())
|
||||
Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds1.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds1.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds1.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds1.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds1.Receipts)).To(Equal(1))
|
||||
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
|
||||
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds2).ToNot(BeNil())
|
||||
Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds2.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds2.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds2.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds2.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds2.Receipts)).To(Equal(1))
|
||||
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
|
||||
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds3).ToNot(BeNil())
|
||||
Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds3.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds3.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds3.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds3.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds3.Receipts)).To(Equal(1))
|
||||
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct1IPLD,
|
||||
Key: test_helpers.Rct1CID.String(),
|
||||
}))
|
||||
|
||||
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds4).ToNot(BeNil())
|
||||
Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds4.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds4.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds4.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds4.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds4.Receipts)).To(Equal(1))
|
||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct2IPLD,
|
||||
Key: test_helpers.Rct2CID.String(),
|
||||
}))
|
||||
|
||||
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds5).ToNot(BeNil())
|
||||
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds5.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds5.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds5.Transactions)).To(Equal(4))
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue())
|
||||
Expect(len(iplds5.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds5.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds5.Receipts)).To(Equal(4))
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
|
||||
|
||||
iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds6).ToNot(BeNil())
|
||||
Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds6.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds6.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds6.Transactions)).To(Equal(1))
|
||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||
Expect(len(iplds6.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds6.Receipts)).To(Equal(1))
|
||||
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.Rct2IPLD,
|
||||
Key: test_helpers.Rct2CID.String(),
|
||||
}))
|
||||
|
||||
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds7).ToNot(BeNil())
|
||||
Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds7.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds7.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds7.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds7.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds7.Receipts)).To(Equal(0))
|
||||
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
||||
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
||||
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
|
||||
BlockNumber: test_helpers.BlockNumber.String(),
|
||||
Data: test_helpers.State2IPLD.RawData(),
|
||||
Key: test_helpers.State2IPLD.Cid().String(),
|
||||
}))
|
||||
|
||||
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds8).ToNot(BeNil())
|
||||
Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||
Expect(iplds8.Header).To(Equal(models.IPLDModel{}))
|
||||
Expect(len(iplds8.Uncles)).To(Equal(0))
|
||||
Expect(len(iplds8.Transactions)).To(Equal(0))
|
||||
Expect(len(iplds8.StorageNodes)).To(Equal(0))
|
||||
Expect(len(iplds8.StateNodes)).To(Equal(0))
|
||||
Expect(len(iplds8.Receipts)).To(Equal(0))
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -17,10 +17,20 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"time"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
)
|
||||
|
||||
// Timestamp in milliseconds
|
||||
func makeTimestamp() int64 {
|
||||
return time.Now().UnixNano() / int64(time.Millisecond)
|
||||
func ResolveToNodeType(nodeType int) sdtypes.NodeType {
|
||||
switch nodeType {
|
||||
case 0:
|
||||
return sdtypes.Branch
|
||||
case 1:
|
||||
return sdtypes.Extension
|
||||
case 2:
|
||||
return sdtypes.Leaf
|
||||
case 3:
|
||||
return sdtypes.Removed
|
||||
default:
|
||||
return sdtypes.Unknown
|
||||
}
|
||||
}
|
||||
|
||||
47
pkg/eth/interfaces.go
Normal file
47
pkg/eth/interfaces.go
Normal file
@ -0,0 +1,47 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// FilterBackend is the geth interface we need to satisfy to use their filters
|
||||
type FilterBackend interface {
|
||||
ChainDb() ethdb.Database
|
||||
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
|
||||
HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
|
||||
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
|
||||
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
|
||||
|
||||
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription
|
||||
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
|
||||
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
|
||||
|
||||
BloomStatus() (uint64, uint64)
|
||||
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
|
||||
}
|
||||
248
pkg/eth/ipld_fetcher.go
Normal file
248
pkg/eth/ipld_fetcher.go
Normal file
@ -0,0 +1,248 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
// Fetcher interface for substituting mocks in tests
|
||||
type Fetcher interface {
|
||||
Fetch(cids CIDWrapper) (*IPLDs, error)
|
||||
}
|
||||
|
||||
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
|
||||
// It interfaces directly with PG-IPFS
|
||||
type IPLDFetcher struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
|
||||
func NewIPLDFetcher(db *sqlx.DB) *IPLDFetcher {
|
||||
return &IPLDFetcher{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
||||
func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
|
||||
log.Debug("fetching iplds")
|
||||
iplds := new(IPLDs)
|
||||
var ok bool
|
||||
iplds.TotalDifficulty, ok = new(big.Int).SetString(cids.Header.TotalDifficulty, 10)
|
||||
if !ok {
|
||||
return nil, errors.New("eth fetcher: unable to set total difficulty")
|
||||
}
|
||||
iplds.BlockNumber = cids.BlockNumber
|
||||
|
||||
tx, err := f.db.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
iplds.Header, err = f.FetchHeader(tx, cids.Header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: header fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Uncles, err = f.FetchUncles(tx, cids.Uncles)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: uncle fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Transactions, err = f.FetchTrxs(tx, cids.Transactions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: transaction fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.Receipts, err = f.FetchRcts(tx, cids.Receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: receipt fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.StateNodes, err = f.FetchState(tx, cids.StateNodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: state fetching error: %s", err.Error())
|
||||
}
|
||||
iplds.StorageNodes, err = f.FetchStorage(tx, cids.StorageNodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("eth pg fetcher: storage fetching error: %s", err.Error())
|
||||
}
|
||||
return iplds, err
|
||||
}
|
||||
|
||||
// FetchHeader fetches header
|
||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
||||
log.Debug("fetching header ipld")
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return models.IPLDModel{}, err
|
||||
}
|
||||
|
||||
headerBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return models.IPLDModel{}, err
|
||||
}
|
||||
return models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: headerBytes,
|
||||
Key: c.CID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchUncles fetches uncles
|
||||
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching uncle iplds")
|
||||
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uncleIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: uncleBytes,
|
||||
Key: c.CID,
|
||||
}
|
||||
}
|
||||
return uncleIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchTrxs fetches transactions
|
||||
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: txBytes,
|
||||
Key: c.CID,
|
||||
}
|
||||
}
|
||||
return trxIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchRcts fetches receipts
|
||||
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]models.IPLDModel, error) {
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctIPLDs := make([]models.IPLDModel, len(cids))
|
||||
for i, c := range cids {
|
||||
blockNumber, err := strconv.ParseUint(c.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rctBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, c.LeafMhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//nodeVal, err := DecodeLeafNode(rctBytes)
|
||||
rctIPLDs[i] = models.IPLDModel{
|
||||
BlockNumber: c.BlockNumber,
|
||||
Data: rctBytes,
|
||||
Key: c.LeafCID,
|
||||
}
|
||||
}
|
||||
return rctIPLDs, nil
|
||||
}
|
||||
|
||||
// FetchState fetches state nodes
|
||||
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]StateNode, error) {
|
||||
log.Debug("fetching state iplds")
|
||||
stateNodes := make([]StateNode, 0, len(cids))
|
||||
for _, stateNode := range cids {
|
||||
if stateNode.CID == "" {
|
||||
continue
|
||||
}
|
||||
blockNumber, err := strconv.ParseUint(stateNode.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, stateNode.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateNodes = append(stateNodes, StateNode{
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: stateNode.BlockNumber,
|
||||
Data: stateBytes,
|
||||
Key: stateNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||
Type: ResolveToNodeType(stateNode.NodeType),
|
||||
Path: stateNode.Path,
|
||||
})
|
||||
}
|
||||
return stateNodes, nil
|
||||
}
|
||||
|
||||
// FetchStorage fetches storage nodes
|
||||
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
|
||||
log.Debug("fetching storage iplds")
|
||||
storageNodes := make([]StorageNode, 0, len(cids))
|
||||
for _, storageNode := range cids {
|
||||
if storageNode.CID == "" || storageNode.StateKey == "" {
|
||||
continue
|
||||
}
|
||||
blockNumber, err := strconv.ParseUint(storageNode.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageBytes, err := shared.FetchIPLDByMhKeyAndBlockNumber(tx, storageNode.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storageNodes = append(storageNodes, StorageNode{
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: storageNode.BlockNumber,
|
||||
Data: storageBytes,
|
||||
Key: storageNode.CID,
|
||||
},
|
||||
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||
Type: ResolveToNodeType(storageNode.NodeType),
|
||||
Path: storageNode.Path,
|
||||
})
|
||||
}
|
||||
return storageNodes, nil
|
||||
}
|
||||
75
pkg/eth/ipld_fetcher_test.go
Normal file
75
pkg/eth/ipld_fetcher_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("IPLDFetcher", func() {
|
||||
var (
|
||||
db *sqlx.DB
|
||||
pubAndIndexer interfaces.StateDiffIndexer
|
||||
fetcher *eth.IPLDFetcher
|
||||
)
|
||||
Describe("Fetch", func() {
|
||||
BeforeEach(func() {
|
||||
var (
|
||||
err error
|
||||
tx interfaces.Batch
|
||||
)
|
||||
db = shared.SetupDB()
|
||||
pubAndIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
tx, err = pubAndIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
for _, node := range test_helpers.MockStateNodes {
|
||||
err = pubAndIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
fetcher = eth.NewIPLDFetcher(db)
|
||||
|
||||
})
|
||||
AfterEach(func() {
|
||||
shared.TearDownDB(db)
|
||||
})
|
||||
|
||||
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
|
||||
iplds, err := fetcher.Fetch(*test_helpers.MockCIDWrapper)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds).ToNot(BeNil())
|
||||
Expect(iplds.TotalDifficulty).To(Equal(test_helpers.MockConvertedPayload.TotalDifficulty))
|
||||
Expect(iplds.BlockNumber).To(Equal(test_helpers.MockConvertedPayload.Block.Number()))
|
||||
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
|
||||
Expect(len(iplds.Uncles)).To(Equal(0))
|
||||
Expect(iplds.Transactions).To(Equal(test_helpers.MockIPLDs.Transactions))
|
||||
Expect(iplds.Receipts).To(Equal(test_helpers.MockIPLDs.Receipts))
|
||||
Expect(iplds.StateNodes).To(Equal(test_helpers.MockIPLDs.StateNodes))
|
||||
Expect(iplds.StorageNodes).To(Equal(test_helpers.MockIPLDs.StorageNodes))
|
||||
})
|
||||
})
|
||||
})
|
||||
659
pkg/eth/ipld_retriever.go
Normal file
659
pkg/eth/ipld_retriever.go
Normal file
@ -0,0 +1,659 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
const (
|
||||
// node type removed value.
|
||||
// https://github.com/vulcanize/go-ethereum/blob/271f4d01e7e2767ffd8e0cd469bf545be96f2a84/statediff/indexer/helpers.go#L34
|
||||
removedNode = 3
|
||||
|
||||
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||
FROM eth.header_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
header_cids.mh_key = blocks.key
|
||||
AND header_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1`
|
||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
uncle_cids.mh_key = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1`
|
||||
RetrieveTransactionsByHashesPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveTransactionByHashPgStr = `SELECT DISTINCT ON (tx_hash) cid, data
|
||||
FROM eth.transaction_cids
|
||||
INNER JOIN public.blocks ON (
|
||||
transaction_cids.mh_key = blocks.key
|
||||
AND transaction_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = $1`
|
||||
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE block_hash = $1
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
transaction_cids.header_id = header_cids.block_hash
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_number = $1
|
||||
AND block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY eth.transaction_cids.index ASC`
|
||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||
FROM eth.receipt_cids
|
||||
INNER JOIN eth.transaction_cids ON (
|
||||
receipt_cids.tx_id = transaction_cids.tx_hash
|
||||
AND receipt_cids.header_id = transaction_cids.header_id
|
||||
AND receipt_cids.block_number = transaction_cids.block_number
|
||||
)
|
||||
INNER JOIN public.blocks ON (
|
||||
receipt_cids.leaf_mh_key = blocks.key
|
||||
AND receipt_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE tx_hash = $1
|
||||
AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))`
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.block_number, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2)
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, state_cids.mh_key, state_cids.node_type
|
||||
FROM eth.state_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND header_cids.block_number <= $2
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
AND storage_cids.block_number = state_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND header_cids.block_number <= $3
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, storage_cids.mh_key, storage_cids.block_number, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||
FROM eth.storage_cids
|
||||
INNER JOIN eth.state_cids ON (
|
||||
storage_cids.header_id = state_cids.header_id
|
||||
AND storage_cids.state_path = state_cids.state_path
|
||||
AND storage_cids.block_number = state_cids.block_number
|
||||
)
|
||||
INNER JOIN eth.header_cids ON (
|
||||
state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
)
|
||||
WHERE state_leaf_key = $1
|
||||
AND storage_leaf_key = $2
|
||||
AND header_cids.block_number <= (SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $3)
|
||||
AND header_cids.block_hash = (SELECT canonical_header_hash(header_cids.block_number))
|
||||
ORDER BY header_cids.block_number DESC
|
||||
LIMIT 1`
|
||||
)
|
||||
|
||||
var EmptyNodeValue = make([]byte, common.HashLength)
|
||||
|
||||
type rctIpldResult struct {
|
||||
LeafCID string `db:"leaf_cid"`
|
||||
Data []byte `db:"data"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
}
|
||||
|
||||
type ipldResult struct {
|
||||
CID string `db:"cid"`
|
||||
Data []byte `db:"data"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
}
|
||||
|
||||
type IPLDRetriever struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func NewIPLDRetriever(db *sqlx.DB) *IPLDRetriever {
|
||||
return &IPLDRetriever{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveHeadersByHashes returns the cids and rlp bytes for the headers corresponding to the provided block hashes
|
||||
func (r *IPLDRetriever) RetrieveHeadersByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
||||
headerResults := make([]ipldResult, 0)
|
||||
hashStrs := make([]string, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
hashStrs[i] = hash.Hex()
|
||||
}
|
||||
if err := r.db.Select(&headerResults, RetrieveHeadersByHashesPgStr, pq.Array(hashStrs)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(headerResults))
|
||||
headers := make([][]byte, len(headerResults))
|
||||
for i, res := range headerResults {
|
||||
cids[i] = res.CID
|
||||
headers[i] = res.Data
|
||||
}
|
||||
return cids, headers, nil
|
||||
}
|
||||
|
||||
// RetrieveHeadersByBlockNumber returns the cids and rlp bytes for the headers corresponding to the provided block number
|
||||
// This can return more than one result since there can be more than one header (non-canonical headers)
|
||||
func (r *IPLDRetriever) RetrieveHeadersByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
||||
headerResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&headerResults, RetrieveHeadersByBlockNumberPgStr, number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(headerResults))
|
||||
headers := make([][]byte, len(headerResults))
|
||||
for i, res := range headerResults {
|
||||
cids[i] = res.CID
|
||||
headers[i] = res.Data
|
||||
}
|
||||
return cids, headers, nil
|
||||
}
|
||||
|
||||
// RetrieveHeaderByHash returns the cid and rlp bytes for the header corresponding to the provided block hash
|
||||
func (r *IPLDRetriever) RetrieveHeaderByHash(hash common.Hash) (string, []byte, error) {
|
||||
headerResult := new(ipldResult)
|
||||
return headerResult.CID, headerResult.Data, r.db.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// RetrieveUnclesByHashes returns the cids and rlp bytes for the uncles corresponding to the provided uncle hashes
|
||||
func (r *IPLDRetriever) RetrieveUnclesByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
||||
uncleResults := make([]ipldResult, 0)
|
||||
hashStrs := make([]string, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
hashStrs[i] = hash.Hex()
|
||||
}
|
||||
if err := r.db.Select(&uncleResults, RetrieveUnclesByHashesPgStr, pq.Array(hashStrs)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(uncleResults))
|
||||
uncles := make([][]byte, len(uncleResults))
|
||||
for i, res := range uncleResults {
|
||||
cids[i] = res.CID
|
||||
uncles[i] = res.Data
|
||||
}
|
||||
return cids, uncles, nil
|
||||
}
|
||||
|
||||
// RetrieveUnclesByBlockHash returns the cids and rlp bytes for the uncles corresponding to the provided block hash (of non-omner root block)
|
||||
func (r *IPLDRetriever) RetrieveUnclesByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
|
||||
uncleResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&uncleResults, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(uncleResults))
|
||||
uncles := make([][]byte, len(uncleResults))
|
||||
for i, res := range uncleResults {
|
||||
cids[i] = res.CID
|
||||
uncles[i] = res.Data
|
||||
}
|
||||
return cids, uncles, nil
|
||||
}
|
||||
|
||||
// RetrieveUnclesByBlockNumber returns the cids and rlp bytes for the uncles corresponding to the provided block number (of non-omner root block)
|
||||
func (r *IPLDRetriever) RetrieveUnclesByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
||||
uncleResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&uncleResults, RetrieveUnclesByBlockNumberPgStr, number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(uncleResults))
|
||||
uncles := make([][]byte, len(uncleResults))
|
||||
for i, res := range uncleResults {
|
||||
cids[i] = res.CID
|
||||
uncles[i] = res.Data
|
||||
}
|
||||
return cids, uncles, nil
|
||||
}
|
||||
|
||||
// RetrieveUncleByHash returns the cid and rlp bytes for the uncle corresponding to the provided uncle hash
|
||||
func (r *IPLDRetriever) RetrieveUncleByHash(hash common.Hash) (string, []byte, error) {
|
||||
uncleResult := new(ipldResult)
|
||||
return uncleResult.CID, uncleResult.Data, r.db.Get(uncleResult, RetrieveUncleByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByHashes returns the cids and rlp bytes for the transactions corresponding to the provided tx hashes
|
||||
func (r *IPLDRetriever) RetrieveTransactionsByHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
hashStrs := make([]string, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
hashStrs[i] = hash.Hex()
|
||||
}
|
||||
if err := r.db.Select(&txResults, RetrieveTransactionsByHashesPgStr, pq.Array(hashStrs)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
|
||||
func (r *IPLDRetriever) RetrieveTransactionsByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByBlockNumber returns the cids and rlp bytes for the transactions corresponding to the provided block number
|
||||
func (r *IPLDRetriever) RetrieveTransactionsByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := r.db.Select(&txResults, RetrieveTransactionsByBlockNumberPgStr, number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionByTxHash returns the cid and rlp bytes for the transaction corresponding to the provided tx hash
|
||||
func (r *IPLDRetriever) RetrieveTransactionByTxHash(hash common.Hash) (string, []byte, error) {
|
||||
txResult := new(ipldResult)
|
||||
return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// DecodeLeafNode decodes the leaf node data
|
||||
func DecodeLeafNode(node []byte) ([]byte, error) {
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ty, err := trie_helpers.CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ty != sdtypes.Leaf {
|
||||
return nil, fmt.Errorf("expected leaf node but found %s", ty)
|
||||
}
|
||||
return nodeElements[1].([]byte), nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes
|
||||
func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
||||
rctResults := make([]rctIpldResult, 0)
|
||||
hashStrs := make([]string, len(hashes))
|
||||
for i, hash := range hashes {
|
||||
hashStrs[i] = hash.Hex()
|
||||
}
|
||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByTxHashesPgStr, pq.Array(hashStrs)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.LeafCID
|
||||
nodeVal, err := DecodeLeafNode(res.Data)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
rcts[i] = nodeVal
|
||||
}
|
||||
return cids, rcts, nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
|
||||
rctResults := make([]rctIpldResult, 0)
|
||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
txs := make([]common.Hash, len(rctResults))
|
||||
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.LeafCID
|
||||
nodeVal, err := DecodeLeafNode(res.Data)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
rcts[i] = nodeVal
|
||||
txs[i] = common.HexToHash(res.TxHash)
|
||||
}
|
||||
|
||||
return cids, rcts, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
||||
rctResults := make([]rctIpldResult, 0)
|
||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.LeafCID
|
||||
nodeVal, err := DecodeLeafNode(res.Data)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
rcts[i] = nodeVal
|
||||
}
|
||||
return cids, rcts, nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) {
|
||||
rctResult := new(rctIpldResult)
|
||||
if err := r.db.Select(&rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex()); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
nodeVal, err := DecodeLeafNode(rctResult.Data)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return rctResult.LeafCID, nodeVal, nil
|
||||
}
|
||||
|
||||
type nodeInfo struct {
|
||||
CID string `db:"cid"`
|
||||
MhKey string `db:"mh_key"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
Data []byte `db:"data"`
|
||||
NodeType int `db:"node_type"`
|
||||
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||
}
|
||||
|
||||
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
|
||||
// TODO: ensure this handles deleted accounts appropriately
|
||||
func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Address, hash common.Hash) (string, []byte, error) {
|
||||
accountResult := new(nodeInfo)
|
||||
leafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if accountResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(accountResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
}
|
||||
if len(i) != 2 {
|
||||
return "", nil, fmt.Errorf("eth IPLDRetriever expected state leaf node rlp to decode into two elements")
|
||||
}
|
||||
return accountResult.CID, i[1].([]byte), nil
|
||||
}
|
||||
|
||||
// RetrieveAccountByAddressAndBlockNumber returns the cid and rlp bytes for the account corresponding to the provided address and block number
|
||||
// This can return a non-canonical account
|
||||
func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Address, number uint64) (string, []byte, error) {
|
||||
accountResult := new(nodeInfo)
|
||||
leafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if accountResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
accountResult.Data, err = shared.FetchIPLD(r.db, accountResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||
}
|
||||
if len(i) != 2 {
|
||||
return "", nil, fmt.Errorf("eth IPLDRetriever expected state leaf node rlp to decode into two elements")
|
||||
}
|
||||
return accountResult.CID, i[1].([]byte), nil
|
||||
}
|
||||
|
||||
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash
|
||||
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) (string, []byte, []byte, error) {
|
||||
storageResult := new(nodeInfo)
|
||||
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
storageHash := crypto.Keccak256Hash(key.Bytes())
|
||||
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(storageResult.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, blockNumber)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
return "", nil, nil, err
|
||||
}
|
||||
if len(i) != 2 {
|
||||
return "", nil, nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
|
||||
}
|
||||
return storageResult.CID, storageResult.Data, i[1].([]byte), nil
|
||||
}
|
||||
|
||||
// RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number
|
||||
// This can retrun a non-canonical value
|
||||
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(address common.Address, storageLeafKey common.Hash, number uint64) (string, []byte, error) {
|
||||
storageResult := new(nodeInfo)
|
||||
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
storageResult.Data, err = shared.FetchIPLD(r.db, storageResult.MhKey, number)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var i []interface{}
|
||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||
return "", nil, fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||
}
|
||||
if len(i) != 2 {
|
||||
return "", nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
|
||||
}
|
||||
return storageResult.CID, i[1].([]byte), nil
|
||||
}
|
||||
@ -1,103 +0,0 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/trie"
|
||||
"github.com/cerc-io/ipld-eth-statedb/trie_by_cid/triedb"
|
||||
)
|
||||
|
||||
// NodeType for explicitly setting type of node
|
||||
type NodeType string
|
||||
|
||||
const (
|
||||
Unknown NodeType = "Unknown"
|
||||
Branch NodeType = "Branch"
|
||||
Extension NodeType = "Extension"
|
||||
Leaf NodeType = "Leaf"
|
||||
Removed NodeType = "Removed" // used to represent paths which have been emptied
|
||||
)
|
||||
|
||||
func (n NodeType) Int() int {
|
||||
switch n {
|
||||
case Branch:
|
||||
return 0
|
||||
case Extension:
|
||||
return 1
|
||||
case Leaf:
|
||||
return 2
|
||||
case Removed:
|
||||
return 3
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// CheckKeyType checks what type of key we have
|
||||
func CheckKeyType(elements []interface{}) (NodeType, error) {
|
||||
if len(elements) > 2 {
|
||||
return Branch, nil
|
||||
}
|
||||
if len(elements) < 2 {
|
||||
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
|
||||
}
|
||||
switch elements[0].([]byte)[0] / 16 {
|
||||
case '\x00':
|
||||
return Extension, nil
|
||||
case '\x01':
|
||||
return Extension, nil
|
||||
case '\x02':
|
||||
return Leaf, nil
|
||||
case '\x03':
|
||||
return Leaf, nil
|
||||
default:
|
||||
return Unknown, fmt.Errorf("unknown hex prefix")
|
||||
}
|
||||
}
|
||||
|
||||
// StateNode holds the data for a single state diff node
|
||||
type StateNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
StorageNodes []StorageNode `json:"storage"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
}
|
||||
|
||||
// StorageNode holds the data for a single storage diff node
|
||||
type StorageNode struct {
|
||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
||||
Path []byte `json:"path" gencodec:"required"`
|
||||
NodeValue []byte `json:"value" gencodec:"required"`
|
||||
LeafKey []byte `json:"leafKey"`
|
||||
}
|
||||
|
||||
func ResolveNode(path []byte, node []byte, trieDB *triedb.Database) (StateNode, []interface{}, error) {
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
ty, err := CheckKeyType(nodeElements)
|
||||
if err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
|
||||
nodePath := make([]byte, len(path))
|
||||
copy(nodePath, path)
|
||||
return StateNode{
|
||||
NodeType: ty,
|
||||
Path: nodePath,
|
||||
NodeValue: node,
|
||||
}, nodeElements, nil
|
||||
}
|
||||
|
||||
// ResolveNodeIt return the state diff node pointed by the iterator.
|
||||
func ResolveNodeIt(it trie.NodeIterator, trieDB *triedb.Database) (StateNode, []interface{}, error) {
|
||||
node, err := it.NodeBlob(), it.Error()
|
||||
if err != nil {
|
||||
return StateNode{}, nil, err
|
||||
}
|
||||
return ResolveNode(it.Path(), node, trieDB)
|
||||
}
|
||||
@ -1,573 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
)
|
||||
|
||||
// Retriever is used for fetching
|
||||
type Retriever struct {
|
||||
db *sqlx.DB
|
||||
gormDB *gorm.DB
|
||||
}
|
||||
|
||||
type IPLDModelRecord struct {
|
||||
models.IPLDModel
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by IPLD
|
||||
func (IPLDModelRecord) TableName() string {
|
||||
return "ipld.blocks"
|
||||
}
|
||||
|
||||
type HeaderCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
BlockHash string `gorm:"primaryKey"`
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
ParentHash string
|
||||
Timestamp uint64
|
||||
StateRoot string
|
||||
TotalDifficulty string `gorm:"column:td"`
|
||||
TxRoot string
|
||||
RctRoot string `gorm:"column:receipt_root"`
|
||||
UnclesHash string
|
||||
Bloom []byte
|
||||
|
||||
// gorm doesn't check if foreign key exists in database.
|
||||
// It is required to eager load relations using preload.
|
||||
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID,BlockNumber;references:BlockHash,BlockNumber"`
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:CID,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by HeaderCIDRecord
|
||||
func (HeaderCIDRecord) TableName() string {
|
||||
return "eth.header_cids"
|
||||
}
|
||||
|
||||
type TransactionCIDRecord struct {
|
||||
CID string `gorm:"column:cid"`
|
||||
TxHash string `gorm:"primaryKey"`
|
||||
BlockNumber string `gorm:"primaryKey"`
|
||||
HeaderID string `gorm:"column:header_id"`
|
||||
Index int64
|
||||
Src string
|
||||
Dst string
|
||||
IPLD IPLDModelRecord `gorm:"foreignKey:CID,BlockNumber;references:Key,BlockNumber"`
|
||||
}
|
||||
|
||||
type StateAccountRecord struct {
|
||||
Nonce uint64 `db:"nonce"`
|
||||
Balance string `db:"balance"`
|
||||
Root string `db:"storage_root"`
|
||||
CodeHash []byte `db:"code_hash"`
|
||||
Removed bool `db:"removed"`
|
||||
}
|
||||
|
||||
// TableName overrides the table name used by TransactionCIDRecord
|
||||
func (TransactionCIDRecord) TableName() string {
|
||||
return "eth.transaction_cids"
|
||||
}
|
||||
|
||||
// NewRetriever returns a pointer to a new Retriever which supports the Retriever interface
|
||||
func NewRetriever(db *sqlx.DB) *Retriever {
|
||||
gormDB, err := gorm.Open(postgres.New(postgres.Config{
|
||||
Conn: db,
|
||||
}), &gorm.Config{})
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return &Retriever{
|
||||
db: db,
|
||||
gormDB: gormDB,
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
|
||||
func (r *Retriever) RetrieveFirstBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := r.db.Get(&blockNumber, "SELECT block_number FROM ipld.blocks ORDER BY block_number ASC LIMIT 1")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
|
||||
func (r *Retriever) RetrieveLastBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := r.db.Get(&blockNumber, "SELECT block_number FROM ipld.blocks ORDER BY block_number DESC LIMIT 1")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
|
||||
for i, topicSet := range topics {
|
||||
if len(topicSet) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !first {
|
||||
pgStr += " AND"
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
|
||||
args = append(args, pq.Array(topicSet))
|
||||
*id++
|
||||
}
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
|
||||
if len(rctFilter.LogAddresses) > 0 {
|
||||
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
|
||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||
*id++
|
||||
}
|
||||
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||
}
|
||||
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
|
||||
rctCond := " AND (receipt_cids.tx_id = ANY ( "
|
||||
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
|
||||
if len(rctFilter.LogAddresses) > 0 {
|
||||
// Filter on log contract addresses if there are any
|
||||
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
|
||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||
*id++
|
||||
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||
}
|
||||
|
||||
pgStr += ")"
|
||||
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else { // If there are no contract addresses to filter on
|
||||
// Filter on topics if there are any
|
||||
if hasTopics(rctFilter.Topics) {
|
||||
pgStr += rctCond + logQuery
|
||||
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
|
||||
pgStr += ")"
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
pgStr += ")"
|
||||
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||
// If there are no contract addresses or topics to filter on,
|
||||
// Filter on txHashes if there are any, and we are matching txs
|
||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
|
||||
args = append(args, pq.Array(txHashes))
|
||||
}
|
||||
}
|
||||
|
||||
return pgStr, args
|
||||
}
|
||||
|
||||
// RetrieveFilteredGQLLogs retrieves and returns all the log CIDs provided blockHash that conform to the provided
|
||||
// filter parameters.
|
||||
func (r *Retriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash, blockNumber *big.Int) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids with block hash", blockHash.String())
|
||||
args := make([]interface{}, 0, 4)
|
||||
id := 1
|
||||
pgStr := RetrieveFilteredGQLLogs
|
||||
args = append(args, blockHash.String())
|
||||
id++
|
||||
|
||||
if blockNumber != nil {
|
||||
pgStr += ` AND receipt_cids.block_number = $2`
|
||||
id++
|
||||
args = append(args, blockNumber.Int64())
|
||||
}
|
||||
|
||||
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||
pgStr += ` ORDER BY log_cids.block_number, log_cids.index`
|
||||
|
||||
logs := make([]LogResult, 0)
|
||||
err := tx.Select(&logs, pgStr, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// RetrieveFilteredLogsForBlock retrieves and returns all the log CIDs for the block that conform to the provided filter parameters.
|
||||
func (r *Retriever) RetrieveFilteredLogsForBlock(db *sqlx.DB, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||
return r.retrieveFilteredLogs(db, rctFilter, -1, -1, blockHash)
|
||||
}
|
||||
|
||||
// RetrieveFilteredLogsForBlockRange retrieves and returns all the log CIDs for the blocks in the range that conform
|
||||
// to the provided filter parameters.
|
||||
func (r *Retriever) RetrieveFilteredLogsForBlockRange(db *sqlx.DB, rctFilter ReceiptFilter, startBlockNumber int64, stopBlockNumber int64) ([]LogResult, error) {
|
||||
return r.retrieveFilteredLogs(db, rctFilter, startBlockNumber, stopBlockNumber, nil)
|
||||
}
|
||||
|
||||
// retrieveFilteredLogs retrieves all the log CIDs either for a single block (by hash) or range of blocks (by number) which
|
||||
// conform to the provided filter parameters.
|
||||
func (r *Retriever) retrieveFilteredLogs(db *sqlx.DB, rctFilter ReceiptFilter, startBlockNumber int64, stopBlockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||
log.Debug("retrieving log cids for receipt ids")
|
||||
args := make([]interface{}, 0, 4)
|
||||
var pgStr string
|
||||
id := 1
|
||||
if blockHash != nil {
|
||||
pgStr = RetrieveFilteredLogsSingle
|
||||
args = append(args, blockHash.String())
|
||||
id++
|
||||
} else {
|
||||
pgStr = RetrieveFilteredLogsRange
|
||||
args = append(args, startBlockNumber)
|
||||
id++
|
||||
args = append(args, stopBlockNumber)
|
||||
id++
|
||||
}
|
||||
|
||||
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||
pgStr += ` ORDER BY eth.log_cids.block_number, eth.log_cids.index`
|
||||
|
||||
logs := make([]LogResult, 0)
|
||||
err := db.Select(&logs, pgStr, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// decode logs and extract original contract Data
|
||||
for i, log := range logs {
|
||||
var buf []interface{}
|
||||
r := bytes.NewReader(log.LogLeafData)
|
||||
if err := rlp.Decode(r, &buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logs[i].Data = buf[2].([]byte)
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func hasTopics(topics [][]string) bool {
|
||||
for _, topicSet := range topics {
|
||||
if len(topicSet) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RetrieveBlockNumberByHash returns the block number for the given block hash
|
||||
func (r *Retriever) RetrieveBlockNumberByHash(tx *sqlx.Tx, blockHash common.Hash) (uint64, error) {
|
||||
log.Debug("retrieving block number for block hash ", blockHash.String())
|
||||
pgStr := `SELECT CAST(block_number as TEXT) FROM eth.header_cids WHERE block_hash = $1`
|
||||
var blockNumberStr string
|
||||
if err := tx.Get(&blockNumberStr, pgStr, blockHash.String()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(blockNumberStr, 10, 64)
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||
func (r *Retriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
|
||||
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := r.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, "header_cids.block_number = ?", blockNumber).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return headerCIDs, nil
|
||||
}
|
||||
|
||||
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash (and optionally block number)
|
||||
func (r *Retriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash, blockNumber *big.Int) (HeaderCIDRecord, error) {
|
||||
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||
|
||||
var headerCIDs []HeaderCIDRecord
|
||||
|
||||
conditions := map[string]interface{}{"block_hash": blockHash.String()}
|
||||
if blockNumber != nil {
|
||||
conditions["header_cids.block_number"] = blockNumber.Int64()
|
||||
}
|
||||
|
||||
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||
err := r.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id", "block_number")
|
||||
}).Joins("IPLD").Find(&headerCIDs, conditions).Error
|
||||
|
||||
if err != nil {
|
||||
log.Error("header cid retrieval error")
|
||||
return HeaderCIDRecord{}, err
|
||||
}
|
||||
|
||||
if len(headerCIDs) == 0 {
|
||||
return HeaderCIDRecord{}, errHeaderHashNotFound
|
||||
} else if len(headerCIDs) > 1 {
|
||||
return HeaderCIDRecord{}, errMultipleHeadersForHash
|
||||
}
|
||||
|
||||
return headerCIDs[0], nil
|
||||
}
|
||||
|
||||
// RetrieveTxCIDByHash returns the tx for the given tx hash (and optionally block number)
|
||||
func (r *Retriever) RetrieveTxCIDByHash(txHash string, blockNumber *big.Int) (TransactionCIDRecord, error) {
|
||||
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||
|
||||
var txCIDs []TransactionCIDRecord
|
||||
|
||||
var err error
|
||||
if blockNumber != nil {
|
||||
err = r.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number)) AND transaction_cids.block_number = ?", txHash, blockNumber.Int64()).Error
|
||||
} else {
|
||||
err = r.gormDB.Joins("IPLD").Find(&txCIDs, "tx_hash = ? AND transaction_cids.header_id = (SELECT canonical_header_hash(transaction_cids.block_number))", txHash).Error
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("tx retrieval error")
|
||||
return TransactionCIDRecord{}, err
|
||||
}
|
||||
|
||||
if len(txCIDs) == 0 {
|
||||
return TransactionCIDRecord{}, errTxHashNotFound
|
||||
} else if len(txCIDs) > 1 {
|
||||
// a transaction can be part of a only one canonical block
|
||||
return TransactionCIDRecord{}, errTxHashInMultipleBlocks
|
||||
}
|
||||
|
||||
return txCIDs[0], nil
|
||||
}
|
||||
|
||||
var EmptyNodeValue = make([]byte, common.HashLength)
|
||||
|
||||
// RetrieveHeaderByHash returns the cid and rlp bytes for the header corresponding to the provided block hash
|
||||
func (r *Retriever) RetrieveHeaderByHash(hash common.Hash) (string, []byte, error) {
|
||||
headerResult := new(ipldResult)
|
||||
return headerResult.CID, headerResult.Data, r.db.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// RetrieveHeaderByHash2 returns the cid and rlp bytes for the header corresponding to the provided block hash
|
||||
// using a sqlx.Tx
|
||||
func (r *Retriever) RetrieveHeaderByHash2(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
|
||||
headerResult := new(ipldResult)
|
||||
return headerResult.CID, headerResult.Data, tx.Get(headerResult, RetrieveHeaderByHashPgStr, hash.Hex())
|
||||
}
|
||||
|
||||
// RetrieveUncles returns the cid and rlp bytes for the uncle list corresponding to the provided block hash, number (of non-omner root block)
|
||||
func (r *Retriever) RetrieveUncles(tx *sqlx.Tx, hash common.Hash, number uint64) (string, []byte, error) {
|
||||
uncleResult := new(ipldResult)
|
||||
if err := tx.Get(uncleResult, RetrieveUnclesPgStr, hash.Hex(), number); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return uncleResult.CID, uncleResult.Data, nil
|
||||
}
|
||||
|
||||
// RetrieveUnclesByBlockHash returns the cid and rlp bytes for the uncle list corresponding to the provided block hash (of non-omner root block)
|
||||
func (r *Retriever) RetrieveUnclesByBlockHash(tx *sqlx.Tx, hash common.Hash) (string, []byte, error) {
|
||||
uncleResult := new(ipldResult)
|
||||
if err := tx.Get(uncleResult, RetrieveUnclesByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return uncleResult.CID, uncleResult.Data, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactions returns the cids and rlp bytes for the transactions corresponding to the provided block hash, number
|
||||
func (r *Retriever) RetrieveTransactions(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&txResults, RetrieveTransactionsPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveTransactionsByBlockHash returns the cids and rlp bytes for the transactions corresponding to the provided block hash
|
||||
func (r *Retriever) RetrieveTransactionsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, error) {
|
||||
txResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&txResults, RetrieveTransactionsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(txResults))
|
||||
txs := make([][]byte, len(txResults))
|
||||
for i, res := range txResults {
|
||||
cids[i] = res.CID
|
||||
txs[i] = res.Data
|
||||
}
|
||||
return cids, txs, nil
|
||||
}
|
||||
|
||||
// DecodeLeafNode decodes the leaf node data
|
||||
func DecodeLeafNode(node []byte) ([]byte, error) {
|
||||
var nodeElements []interface{}
|
||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ok, err := IsLeaf(nodeElements)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected leaf node but found %v", nodeElements)
|
||||
}
|
||||
return nodeElements[1].([]byte), nil
|
||||
}
|
||||
|
||||
// RetrieveReceipts returns the cids and rlp bytes for the receipts corresponding to the provided block hash, number.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *Retriever) RetrieveReceipts(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, []common.Hash, error) {
|
||||
rctResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&rctResults, RetrieveReceiptsPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
txs := make([]common.Hash, len(rctResults))
|
||||
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.CID
|
||||
rcts[i] = res.Data
|
||||
txs[i] = common.HexToHash(res.TxHash)
|
||||
}
|
||||
|
||||
return cids, rcts, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||
func (r *Retriever) RetrieveReceiptsByBlockHash(tx *sqlx.Tx, hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
|
||||
rctResults := make([]ipldResult, 0)
|
||||
if err := tx.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(rctResults))
|
||||
rcts := make([][]byte, len(rctResults))
|
||||
txs := make([]common.Hash, len(rctResults))
|
||||
|
||||
for i, res := range rctResults {
|
||||
cids[i] = res.CID
|
||||
nodeVal, err := DecodeLeafNode(res.Data)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
rcts[i] = nodeVal
|
||||
txs[i] = common.HexToHash(res.TxHash)
|
||||
}
|
||||
|
||||
return cids, rcts, txs, nil
|
||||
}
|
||||
|
||||
// RetrieveWithdrawals returns the CIDs and RLP bytes for the withdrawals corresponding to the
|
||||
// provided block hash, number. Returned CIDs correspond to the leaf node data which contains the
|
||||
// withdrawal object.
|
||||
func (r *Retriever) RetrieveWithdrawals(tx *sqlx.Tx, hash common.Hash, number uint64) ([]string, [][]byte, error) {
|
||||
results := make([]ipldResult, 0)
|
||||
if err := tx.Select(&results, RetrieveWithdrawalsPgStr, hash.Hex(), number); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cids := make([]string, len(results))
|
||||
withdrawals := make([][]byte, len(results))
|
||||
|
||||
for i, res := range results {
|
||||
cids[i] = res.CID
|
||||
withdrawals[i] = res.Data
|
||||
}
|
||||
return cids, withdrawals, nil
|
||||
}
|
||||
|
||||
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
|
||||
// TODO: ensure this handles deleted accounts appropriately
|
||||
func (r *Retriever) RetrieveAccountByAddressAndBlockHash(address common.Address, hash common.Hash) (StateAccountRecord, error) {
|
||||
var accountResult StateAccountRecord
|
||||
leafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
if err := r.db.Get(&accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
|
||||
return StateAccountRecord{}, err
|
||||
}
|
||||
|
||||
if accountResult.Removed {
|
||||
return StateAccountRecord{}, nil
|
||||
}
|
||||
return accountResult, nil
|
||||
}
|
||||
|
||||
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash
|
||||
func (r *Retriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) ([]byte, error) {
|
||||
var storageResult nodeInfo
|
||||
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
storageHash := crypto.Keccak256Hash(key.Bytes())
|
||||
if err := r.db.Get(&storageResult,
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr,
|
||||
stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if storageResult.StateLeafRemoved || storageResult.Removed {
|
||||
return EmptyNodeValue, nil
|
||||
}
|
||||
return storageResult.Value, nil
|
||||
}
|
||||
|
||||
// RetrieveStorageAndRLP returns the cid and rlp bytes for the storage value corresponding to the
|
||||
// provided address, storage slot, and block hash
|
||||
func (r *Retriever) RetrieveStorageAndRLP(address common.Address, key, hash common.Hash) (string, []byte, error) {
|
||||
var storageResult nodeInfo
|
||||
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
||||
storageHash := crypto.Keccak256Hash(key.Bytes())
|
||||
if err := r.db.Get(&storageResult,
|
||||
RetrieveStorageAndRLPByAddressHashAndLeafKeyAndBlockHashPgStr,
|
||||
stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if storageResult.StateLeafRemoved || storageResult.Removed {
|
||||
return "", EmptyNodeValue, nil
|
||||
}
|
||||
return storageResult.CID, storageResult.Data, nil
|
||||
}
|
||||
@ -1,182 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
|
||||
)
|
||||
|
||||
var _ = Describe("Retriever", func() {
|
||||
var (
|
||||
db *sqlx.DB
|
||||
diffIndexer interfaces.StateDiffIndexer
|
||||
retriever *eth.Retriever
|
||||
ctx = context.Background()
|
||||
)
|
||||
BeforeEach(func() {
|
||||
db = shared.SetupDB()
|
||||
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
retriever = eth.NewRetriever(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
shared.TearDownDB(db)
|
||||
db.Close()
|
||||
})
|
||||
|
||||
It("Retrieve", func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
for _, node := range test_helpers.MockStateNodes {
|
||||
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
Describe("RetrieveFirstBlockNumber", func() {
|
||||
It("Throws an error if there are no blocks in the database", func() {
|
||||
_, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
payload := test_helpers.MockConvertedPayload
|
||||
payload.Block = newMockBlock(1010101)
|
||||
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
|
||||
It("Gets the number of the first block that has data in the database", func() {
|
||||
payload1 := test_helpers.MockConvertedPayload
|
||||
payload1.Block = newMockBlock(1010101)
|
||||
payload2 := payload1
|
||||
payload2.Block = newMockBlock(5)
|
||||
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveFirstBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(5)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("RetrieveLastBlockNumber", func() {
|
||||
It("Throws an error if there are no blocks in the database", func() {
|
||||
_, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1)))
|
||||
})
|
||||
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
payload := test_helpers.MockConvertedPayload
|
||||
payload.Block = newMockBlock(1010101)
|
||||
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
|
||||
It("Gets the number of the latest block that has data in the database", func() {
|
||||
payload1 := test_helpers.MockConvertedPayload
|
||||
payload1.Block = newMockBlock(1010101)
|
||||
payload2 := payload1
|
||||
payload2.Block = newMockBlock(5)
|
||||
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
num, err := retriever.RetrieveLastBlockNumber()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(num).To(Equal(int64(1010101)))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newMockBlock(blockNumber uint64) *types.Block {
|
||||
header := test_helpers.MockHeader
|
||||
header.Number.SetUint64(blockNumber)
|
||||
return types.NewBlock(&test_helpers.MockHeader, test_helpers.MockTransactions, nil, test_helpers.MockReceipts, trie.NewEmpty(nil))
|
||||
}
|
||||
292
pkg/eth/sql.go
292
pkg/eth/sql.go
@ -1,292 +0,0 @@
|
||||
package eth
|
||||
|
||||
const (
|
||||
RetrieveHeaderByHashPgStr = `
|
||||
SELECT header_cids.cid,
|
||||
blocks.data
|
||||
FROM ipld.blocks,
|
||||
eth.header_cids
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.canonical
|
||||
AND blocks.key = header_cids.cid
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
LIMIT 1
|
||||
`
|
||||
RetrieveUnclesPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN ipld.blocks ON (
|
||||
uncle_cids.cid = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
ORDER BY uncle_cids.parent_hash
|
||||
LIMIT 1`
|
||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||
FROM eth.uncle_cids
|
||||
INNER JOIN eth.header_cids ON (
|
||||
uncle_cids.header_id = header_cids.block_hash
|
||||
AND uncle_cids.block_number = header_cids.block_number
|
||||
)
|
||||
INNER JOIN ipld.blocks ON (
|
||||
uncle_cids.cid = blocks.key
|
||||
AND uncle_cids.block_number = blocks.block_number
|
||||
)
|
||||
WHERE header_cids.block_hash = $1
|
||||
ORDER BY uncle_cids.parent_hash
|
||||
LIMIT 1`
|
||||
RetrieveTransactionsPgStr = `
|
||||
SELECT transaction_cids.cid,
|
||||
blocks.data
|
||||
FROM eth.transaction_cids,
|
||||
eth.header_cids,
|
||||
ipld.blocks
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
AND header_cids.canonical
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
AND blocks.key = transaction_cids.cid
|
||||
ORDER BY eth.transaction_cids.index ASC
|
||||
`
|
||||
RetrieveTransactionsByBlockHashPgStr = `
|
||||
SELECT transaction_cids.cid,
|
||||
blocks.data
|
||||
FROM eth.transaction_cids,
|
||||
eth.header_cids,
|
||||
ipld.blocks
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.canonical
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
AND blocks.key = transaction_cids.cid
|
||||
ORDER BY eth.transaction_cids.index ASC
|
||||
`
|
||||
RetrieveReceiptsPgStr = `
|
||||
SELECT receipt_cids.cid,
|
||||
blocks.data,
|
||||
eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids,
|
||||
eth.transaction_cids,
|
||||
eth.header_cids,
|
||||
ipld.blocks
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
AND header_cids.canonical
|
||||
AND receipt_cids.block_number = header_cids.block_number
|
||||
AND receipt_cids.header_id = header_cids.block_hash
|
||||
AND receipt_cids.TX_ID = transaction_cids.TX_HASH
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
AND blocks.key = receipt_cids.cid
|
||||
ORDER BY eth.transaction_cids.index ASC
|
||||
`
|
||||
RetrieveReceiptsByBlockHashPgStr = `
|
||||
SELECT receipt_cids.cid,
|
||||
blocks.data,
|
||||
eth.transaction_cids.tx_hash
|
||||
FROM eth.receipt_cids,
|
||||
eth.transaction_cids,
|
||||
eth.header_cids,
|
||||
ipld.blocks
|
||||
WHERE header_cids.block_hash = $1
|
||||
AND header_cids.canonical
|
||||
AND receipt_cids.block_number = header_cids.block_number
|
||||
AND receipt_cids.header_id = header_cids.block_hash
|
||||
AND receipt_cids.TX_ID = transaction_cids.TX_HASH
|
||||
AND transaction_cids.block_number = header_cids.block_number
|
||||
AND transaction_cids.header_id = header_cids.block_hash
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
AND blocks.key = receipt_cids.cid
|
||||
ORDER BY eth.transaction_cids.index ASC
|
||||
`
|
||||
RetrieveWithdrawalsPgStr = `
|
||||
SELECT withdrawal_cids.cid,
|
||||
blocks.data
|
||||
FROM eth.withdrawal_cids
|
||||
JOIN eth.header_cids
|
||||
ON header_cids.block_hash = $1
|
||||
AND header_cids.block_number = $2
|
||||
AND header_cids.canonical
|
||||
AND withdrawal_cids.block_number = header_cids.block_number
|
||||
AND withdrawal_cids.header_id = header_cids.block_hash
|
||||
JOIN ipld.blocks
|
||||
ON blocks.block_number = header_cids.block_number
|
||||
AND blocks.key = withdrawal_cids.cid
|
||||
ORDER BY eth.withdrawal_cids.index ASC`
|
||||
|
||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `
|
||||
SELECT state_cids.nonce,
|
||||
state_cids.balance,
|
||||
state_cids.storage_root,
|
||||
state_cids.code_hash,
|
||||
state_cids.removed
|
||||
FROM eth.state_cids,
|
||||
eth.header_cids
|
||||
WHERE state_cids.state_leaf_key = $1
|
||||
AND state_cids.block_number <=
|
||||
(SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2
|
||||
LIMIT 1)
|
||||
AND header_cids.canonical
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
ORDER BY state_cids.block_number DESC
|
||||
LIMIT 1
|
||||
`
|
||||
RetrieveFilteredGQLLogs = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.header_id as block_hash,
|
||||
eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id, eth.log_cids.address,
|
||||
eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, eth.receipt_cids.tx_id AS tx_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, ipld.blocks
|
||||
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||
AND eth.log_cids.header_id = receipt_cids.header_id
|
||||
AND eth.log_cids.block_number = receipt_cids.block_number
|
||||
AND log_cids.cid = blocks.key
|
||||
AND log_cids.block_number = blocks.block_number
|
||||
AND receipt_cids.header_id = $1`
|
||||
RetrieveFilteredLogsRange = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
ipld.blocks.data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, log_cids.header_id AS block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, ipld.blocks
|
||||
WHERE eth.log_cids.block_number >= $1 AND eth.log_cids.block_number <= $2
|
||||
AND eth.log_cids.header_id IN (SELECT block_hash from eth.header_cids where eth.header_cids.block_number >= $1 AND eth.header_cids.block_number <= $2 and eth.header_cids.canonical)
|
||||
AND eth.transaction_cids.block_number = eth.log_cids.block_number
|
||||
AND eth.transaction_cids.header_id = eth.log_cids.header_id
|
||||
AND eth.receipt_cids.block_number = eth.log_cids.block_number
|
||||
AND eth.receipt_cids.header_id = eth.log_cids.header_id
|
||||
AND eth.receipt_cids.tx_id = eth.log_cids.rct_id
|
||||
AND eth.receipt_cids.tx_id = eth.transaction_cids.tx_hash
|
||||
AND ipld.blocks.block_number = eth.log_cids.block_number
|
||||
AND ipld.blocks.key = eth.log_cids.cid`
|
||||
|
||||
RetrieveFilteredLogsSingle = `SELECT CAST(eth.log_cids.block_number as TEXT), eth.log_cids.cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||
eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||
ipld.blocks.data, eth.receipt_cids.cid AS rct_cid, eth.receipt_cids.post_status, log_cids.header_id AS block_hash
|
||||
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, ipld.blocks
|
||||
WHERE eth.log_cids.header_id = $1
|
||||
AND eth.transaction_cids.block_number = eth.log_cids.block_number
|
||||
AND eth.transaction_cids.header_id = eth.log_cids.header_id
|
||||
AND eth.receipt_cids.block_number = eth.log_cids.block_number
|
||||
AND eth.receipt_cids.header_id = eth.log_cids.header_id
|
||||
AND eth.receipt_cids.tx_id = eth.log_cids.rct_id
|
||||
AND eth.receipt_cids.tx_id = eth.transaction_cids.tx_hash
|
||||
AND ipld.blocks.block_number = eth.log_cids.block_number
|
||||
AND ipld.blocks.key = eth.log_cids.cid`
|
||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `
|
||||
SELECT storage_cids.cid,
|
||||
storage_cids.val,
|
||||
storage_cids.block_number,
|
||||
storage_cids.removed,
|
||||
was_state_leaf_removed_by_number(storage_cids.state_leaf_key, storage_cids.block_number) AS state_leaf_removed
|
||||
FROM eth.storage_cids, eth.header_cids
|
||||
WHERE header_cids.block_number <= (SELECT block_number from eth.header_cids where block_hash = $3 LIMIT 1)
|
||||
AND header_cids.canonical
|
||||
AND storage_cids.block_number = header_cids.block_number
|
||||
AND storage_cids.header_id = header_cids.block_hash
|
||||
AND storage_cids.storage_leaf_key = $2
|
||||
AND storage_cids.state_leaf_key = $1
|
||||
ORDER BY storage_cids.block_number DESC LIMIT 1
|
||||
`
|
||||
RetrieveStorageAndRLPByAddressHashAndLeafKeyAndBlockHashPgStr = `
|
||||
SELECT storage_cids.cid,
|
||||
storage_cids.val,
|
||||
storage_cids.block_number,
|
||||
storage_cids.removed,
|
||||
was_state_leaf_removed_by_number(storage_cids.state_leaf_key, storage_cids.block_number) AS state_leaf_removed,
|
||||
blocks.data
|
||||
FROM eth.storage_cids, eth.header_cids, ipld.blocks
|
||||
WHERE header_cids.block_number <= (SELECT block_number from eth.header_cids where block_hash = $3 LIMIT 1)
|
||||
AND header_cids.canonical
|
||||
AND storage_cids.block_number = header_cids.block_number
|
||||
AND storage_cids.header_id = header_cids.block_hash
|
||||
AND storage_cids.storage_leaf_key = $2
|
||||
AND storage_cids.state_leaf_key = $1
|
||||
AND blocks.key = storage_cids.cid
|
||||
AND blocks.block_number = storage_cids.block_number
|
||||
ORDER BY storage_cids.block_number DESC LIMIT 1
|
||||
`
|
||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids WHERE block_number = $1 AND canonical`
|
||||
RetrieveCanonicalHeaderByNumber = `
|
||||
SELECT header_cids.cid,
|
||||
blocks.data
|
||||
FROM ipld.blocks,
|
||||
eth.header_cids
|
||||
WHERE header_cids.block_number = $1
|
||||
AND header_cids.canonical
|
||||
AND blocks.key = header_cids.cid
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
LIMIT 1
|
||||
`
|
||||
RetrieveCanonicalHeaderAndHashByNumber = `
|
||||
SELECT blocks.data,
|
||||
header_cids.block_hash
|
||||
FROM ipld.blocks,
|
||||
eth.header_cids
|
||||
WHERE header_cids.block_number = $1
|
||||
AND header_cids.canonical
|
||||
AND blocks.key = header_cids.cid
|
||||
AND blocks.block_number = header_cids.block_number
|
||||
LIMIT 1
|
||||
`
|
||||
RetrieveTD = `SELECT CAST(td as TEXT) FROM eth.header_cids
|
||||
WHERE header_cids.block_hash = $1`
|
||||
RetrieveRPCTransaction = `
|
||||
SELECT blocks.data,
|
||||
transaction_cids.header_id,
|
||||
transaction_cids.block_number,
|
||||
transaction_cids.index
|
||||
FROM eth.transaction_cids,
|
||||
ipld.blocks,
|
||||
eth.header_cids
|
||||
WHERE transaction_cids.TX_HASH = $1
|
||||
AND header_cids.block_hash = transaction_cids.header_id
|
||||
AND header_cids.block_number = transaction_cids.block_number
|
||||
AND header_cids.canonical
|
||||
AND blocks.key = transaction_cids.cid
|
||||
AND blocks.block_number = transaction_cids.block_number
|
||||
`
|
||||
RetrieveCodeHashByLeafKeyAndBlockHash = `
|
||||
SELECT state_cids.code_hash
|
||||
FROM eth.state_cids,
|
||||
eth.header_cids
|
||||
WHERE
|
||||
state_cids.state_leaf_key = $1
|
||||
AND state_cids.block_number <=
|
||||
(SELECT block_number
|
||||
FROM eth.header_cids
|
||||
WHERE block_hash = $2
|
||||
LIMIT 1)
|
||||
AND header_cids.canonical
|
||||
AND state_cids.header_id = header_cids.block_hash
|
||||
AND state_cids.block_number = header_cids.block_number
|
||||
ORDER BY state_cids.block_number DESC
|
||||
LIMIT 1
|
||||
`
|
||||
RetrieveCodeByKey = `SELECT data FROM ipld.blocks WHERE key = $1`
|
||||
)
|
||||
|
||||
type ipldResult struct {
|
||||
CID string `db:"cid"`
|
||||
Data []byte `db:"data"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
}
|
||||
|
||||
type nodeInfo struct {
|
||||
CID string `db:"cid"`
|
||||
Value []byte `db:"val"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
Data []byte `db:"data"`
|
||||
Removed bool `db:"removed"`
|
||||
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||
}
|
||||
@ -1,71 +0,0 @@
|
||||
package eth_state_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/types"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
)
|
||||
|
||||
func CheckGetSliceResponse(sliceResponse eth.GetSliceResponse, expectedResponse eth.GetSliceResponse) {
|
||||
Expect(sliceResponse.SliceID).To(Equal(expectedResponse.SliceID))
|
||||
Expect(sliceResponse.TrieNodes).To(Equal(expectedResponse.TrieNodes))
|
||||
Expect(sliceResponse.Leaves).To(Equal(expectedResponse.Leaves))
|
||||
Expect(sliceResponse.MetaData.NodeStats).To(Equal(expectedResponse.MetaData.NodeStats))
|
||||
}
|
||||
|
||||
// EqualBigInt compares a hexutil.Big for equality with a big.Int or hexutil.Big value.
|
||||
// It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func EqualBigInt(expected *big.Int) types.GomegaMatcher {
|
||||
return &BigIntEqualMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
// EqualBigHex compares a hexutil.Big for equality with a big.Int or hexutil.Big value.
|
||||
// It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func EqualBigHex(expected *hexutil.Big) types.GomegaMatcher {
|
||||
return &BigIntEqualMatcher{
|
||||
Expected: expected.ToInt(),
|
||||
}
|
||||
}
|
||||
|
||||
type BigIntEqualMatcher struct {
|
||||
Expected *big.Int
|
||||
}
|
||||
|
||||
func (matcher *BigIntEqualMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
|
||||
var asInt *big.Int
|
||||
switch casted := actual.(type) {
|
||||
case *big.Int:
|
||||
asInt = casted
|
||||
case *hexutil.Big:
|
||||
asInt = (*big.Int)(casted)
|
||||
default:
|
||||
return false, fmt.Errorf("BigIntEqualMatcher expects a hexutil.Big or big.Int. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
return matcher.Expected.Cmp(asInt) == 0, nil
|
||||
}
|
||||
|
||||
func (matcher *BigIntEqualMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
actualString, actualOK := actual.(string)
|
||||
expectedString := matcher.Expected.String()
|
||||
if actualOK {
|
||||
return format.MessageWithDiff(actualString, "to equal", expectedString)
|
||||
}
|
||||
|
||||
return format.Message(actual, "to equal", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *BigIntEqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to equal", matcher.Expected)
|
||||
}
|
||||
@ -1,836 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_state_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff"
|
||||
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
parsedABI abi.ABI
|
||||
randomAddress = common.HexToAddress("0x9F4203bd7a11aCB94882050E6f1C3ab14BBaD3D9")
|
||||
randomHash = crypto.Keccak256Hash(randomAddress.Bytes())
|
||||
number = rpc.BlockNumber(test_helpers.BlockNumber1)
|
||||
|
||||
block1StateRoot = common.HexToHash("0xa1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21")
|
||||
rootDataHashBlock1 = "a1f614839ebdd58677df2c9d66a3e0acc9462acc49fad6006d0b6e5d2b98ed21"
|
||||
rootDataBlock1 = "f871a0577652b625b77bdb5bf77bc43f3125cad7464d679d1575565277d3611b8053e780808080a0fe889f10e5db8f2c2bf355928152a17f6e3bb99a9241ac6d84c77e6264509c798080808080808080a011db0cda34a896dabeb6839bb06a38f49514cfa486435984eb013b7df9ee85c58080"
|
||||
|
||||
block5StateRoot = common.HexToHash("0x572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43")
|
||||
rootDataHashBlock5 = "572ef3b6b3d5164ed9d83341073f13af4d60a3aab38989b6c03917544f186a43"
|
||||
rootDataBlock5 = "f8b1a0408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f280808080a0b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91a0180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e48080808080a0422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac995180a02d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba358080"
|
||||
|
||||
account1DataHash = "180d554b171f6acf8295e376266df2311f68975d74c02753b85707d308f703e4"
|
||||
account1Data = "f869a03114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45b846f8440180a04bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0a0ba79854f3dbf6505fdbb085888e25fae8fa97288c5ce8fcd39aa589290d9a659"
|
||||
account1StateLeafKey = "0x6114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45"
|
||||
account1Code = "608060405234801561001057600080fd5b50600436106100415760003560e01c806343d726d61461004657806365f3c31a1461005057806373d4a13a1461007e575b600080fd5b61004e61009c565b005b61007c6004803603602081101561006657600080fd5b810190808035906020019092919050505061017b565b005b610086610185565b6040518082815260200191505060405180910390f35b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610141576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602281526020018061018c6022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018190555050565b6001548156fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a723158205ba91466129f45285f53176d805117208c231ec6343d7896790e6fc4165b802b64736f6c63430005110032"
|
||||
account2DataHash = "2d264f591aa3fa9df3cbeea190a4fd8d5483ddfb1b85603b2a006d179f79ba35"
|
||||
account2Data = "f871a03926db69aaced518e9b9f0f434a473e7174109c943548bb8f23be41ca76d9ad2b84ef84c02881bc16d674ec82710a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account3DataHash = "408dd81f6cd5c614f91ecd9faa01d5feba936e0314ba04f99c74069ba819e0f2"
|
||||
account3Data = "f86da030bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2ab84af848058405f5b608a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account4DataHash = "422c7cc4fa407603f0879a0ecaa809682ce98dbef30551a34bcce09fa3ac9951"
|
||||
account4Data = "f871a03957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45b84ef84c80883782dace9d9003e8a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
account5DataHash = "b356351d60bc9894cf1f1d6cb68c815f0131d50f1da83c4023a09ec855cfff91"
|
||||
account5Data = "f871a03380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312ab84ef84c80883782dace9d900000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
|
||||
contractStorageRootBlock5 = common.HexToHash("0x4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0")
|
||||
storageRootDataHashBlock5 = "4bd45c41d863f1bcf5da53364387fcdd64f77924d388a4df47e64132273fb4c0"
|
||||
storageRootDataBlock5 = "f838a120290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
|
||||
|
||||
contractStorageRootBlock4 = common.HexToHash("0x64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68")
|
||||
storageRootDataHashBlock4 = "64ad893aa7937d05983daa8b7d221acdf1c116433f29dcd1ea69f16fa96fce68"
|
||||
storageRootDataBlock4 = "f8518080a08e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d558080808080808080a07b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af8080808080"
|
||||
|
||||
storageNode1DataHash = "7b58866e3801680bea90c82a80eb08889ececef107b8b504ae1d1a1e1e17b7af"
|
||||
storageNode1Data = "e2a0310e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf609"
|
||||
storageNode2DataHash = "8e8ada45207a7d2f19dd6f0ee4955cec64fa5ebef29568b5c449a4c4dd361d55"
|
||||
storageNode2Data = "f7a0390decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e5639594703c4b2bd70c169f5717101caee543299fc946c7"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// load abi
|
||||
abiBytes, err := os.ReadFile("../test_helpers/abi.json")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
parsedABI, err = abi.JSON(bytes.NewReader(abiBytes))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
const chainLength = 5
|
||||
|
||||
var (
|
||||
blocks []*types.Block
|
||||
receipts []types.Receipts
|
||||
chain *core.BlockChain
|
||||
db *sqlx.DB
|
||||
api *eth.PublicEthAPI
|
||||
backend *eth.Backend
|
||||
chainConfig = &*params.TestChainConfig
|
||||
mockTD = big.NewInt(1337)
|
||||
expectedCanonicalHeader map[string]interface{}
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
chainConfig.LondonBlock = big.NewInt(100)
|
||||
|
||||
// db and type initializations
|
||||
var err error
|
||||
db = shared.SetupDB()
|
||||
|
||||
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||
GroupCacheConfig: &shared.GroupCacheConfig{
|
||||
StateDB: shared.GroupConfig{
|
||||
Name: "eth_state_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
api, _ = eth.NewPublicEthAPI(backend, nil, eth.APIConfig{StateDiffTimeout: shared.DefaultStateDiffTimeout})
|
||||
|
||||
// make the test blockchain (and state)
|
||||
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen, chainConfig)
|
||||
|
||||
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
params := statediff.Params{}
|
||||
canonicalHeader := blocks[1].Header()
|
||||
expectedCanonicalHeader = map[string]interface{}{
|
||||
"number": (*hexutil.Big)(canonicalHeader.Number),
|
||||
"hash": canonicalHeader.Hash(),
|
||||
"parentHash": canonicalHeader.ParentHash,
|
||||
"nonce": canonicalHeader.Nonce,
|
||||
"mixHash": canonicalHeader.MixDigest,
|
||||
"sha3Uncles": canonicalHeader.UncleHash,
|
||||
"logsBloom": canonicalHeader.Bloom,
|
||||
"stateRoot": canonicalHeader.Root,
|
||||
"miner": canonicalHeader.Coinbase,
|
||||
"difficulty": (*hexutil.Big)(canonicalHeader.Difficulty),
|
||||
"extraData": hexutil.Bytes([]byte{}),
|
||||
"gasLimit": hexutil.Uint64(canonicalHeader.GasLimit),
|
||||
"gasUsed": hexutil.Uint64(canonicalHeader.GasUsed),
|
||||
"timestamp": hexutil.Uint64(canonicalHeader.Time),
|
||||
"transactionsRoot": canonicalHeader.TxHash,
|
||||
"receiptsRoot": canonicalHeader.ReceiptHash,
|
||||
"totalDifficulty": (*hexutil.Big)(mockTD),
|
||||
}
|
||||
|
||||
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||
// NOTE: Nan-canonical blocks must come first, because the statediffer will assume the most recent block it is
|
||||
// provided at a certain height is canonical. This is true inside geth, but not necessarily inside this test.
|
||||
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The non-canonical header has a child
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||
builder := statediff.NewBuilder(adapt.GethStateView(chain.StateCache()))
|
||||
for i, block := range blocks {
|
||||
var args statediff.Args
|
||||
var rcts types.Receipts
|
||||
if i == 0 {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: common.Hash{},
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
} else {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: blocks[i-1].Root(),
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
rcts = receipts[i-1]
|
||||
}
|
||||
diff, err := builder.BuildStateDiffObject(args, params)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
tx, err := transformer.PushBlock(block, rcts, mockTD)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
for _, node := range diff.Nodes {
|
||||
err = transformer.PushStateNode(tx, node, block.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, ipld := range diff.IPLDs {
|
||||
err = transformer.PushIPLD(tx, ipld)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
shared.TearDownDB(db)
|
||||
chain.Stop()
|
||||
})
|
||||
|
||||
var _ = Describe("eth state reading tests", func() {
|
||||
|
||||
Describe("eth_call", func() {
|
||||
It("Applies call args (tx data) on top of state, returning the result (e.g. a Getter method call)", func() {
|
||||
data, err := parsedABI.Pack("data")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
bdata := hexutil.Bytes(data)
|
||||
callArgs := eth.CallArgs{
|
||||
To: &test_helpers.ContractAddr,
|
||||
Data: &bdata,
|
||||
}
|
||||
|
||||
// Before contract deployment, returns nil
|
||||
res, err := api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(0), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(BeNil())
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(1), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(BeNil())
|
||||
|
||||
// After deployment
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(2), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(3), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(4), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
|
||||
res, err = api.Call(context.Background(), callArgs, rpc.BlockNumberOrHashWithNumber(5), nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"))
|
||||
Expect(res).To(Equal(expectedRes))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getBalance", func() {
|
||||
var (
|
||||
expectedContractBalance = (*hexutil.Big)(common.Big0)
|
||||
expectedBankBalanceBlock0 = (*hexutil.Big)(test_helpers.TestBankFunds)
|
||||
|
||||
expectedAcct1BalanceBlock1 = (*hexutil.Big)(big.NewInt(10000))
|
||||
expectedBankBalanceBlock1 = (*hexutil.Big)(new(big.Int).Sub(test_helpers.TestBankFunds, big.NewInt(10000)))
|
||||
|
||||
expectedAcct2BalanceBlock2 = (*hexutil.Big)(big.NewInt(1000))
|
||||
expectedBankBalanceBlock2 = (*hexutil.Big)(new(big.Int).Sub(expectedBankBalanceBlock1.ToInt(), big.NewInt(1000)))
|
||||
|
||||
expectedAcct2BalanceBlock3 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock2.ToInt(), test_helpers.MiningReward))
|
||||
|
||||
expectedAcct2BalanceBlock4 = (*hexutil.Big)(new(big.Int).Add(expectedAcct2BalanceBlock3.ToInt(), test_helpers.MiningReward))
|
||||
|
||||
expectedAcct1BalanceBlock5 = (*hexutil.Big)(new(big.Int).Add(expectedAcct1BalanceBlock1.ToInt(), test_helpers.MiningReward))
|
||||
)
|
||||
|
||||
It("Retrieves account balance by block number", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt((common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt((common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock3))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock5))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
})
|
||||
It("Retrieves account balance by block hash", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt((common.Big0)))
|
||||
|
||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt((common.Big0)))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock2))
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock3))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock1))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct1BalanceBlock5))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedAcct2BalanceBlock4))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedContractBalance))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigHex(expectedBankBalanceBlock2))
|
||||
})
|
||||
It("Returns 0 if account balance not found by block number", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
})
|
||||
It("Returns 0 if account balance not found by block hash", func() {
|
||||
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
|
||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(bal).To(EqualBigInt(common.Big0))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getCode", func() {
|
||||
It("Retrieves the code for the provided contract address at the block with the provided number", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
|
||||
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
})
|
||||
It("Retrieves the code for the provided contract address at the block with the provided hash", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
|
||||
code, err = api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||
})
|
||||
It("Returns `nil` for an account it cannot find the code for", func() {
|
||||
code, err := api.GetCode(ctx, randomAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(BeEmpty())
|
||||
})
|
||||
It("Returns `nil` for a contract that doesn't exist at this height", func() {
|
||||
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(code).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getStorageAt", func() {
|
||||
It("Returns empty slice if it tries to access a contract which does not exist", func() {
|
||||
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
|
||||
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Returns empty slice if it tries to access a contract slot which does not exist", func() {
|
||||
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
|
||||
// After deployment
|
||||
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
|
||||
Expect(val).To(Equal(expectedRes))
|
||||
|
||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||
})
|
||||
It("Throws an error for a non-existing block hash", func() {
|
||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError("header for hash not found"))
|
||||
})
|
||||
It("Throws an error for a non-existing block number", func() {
|
||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError("header not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getHeaderByNumber", func() {
|
||||
It("Finds the canonical header based on the header's weight relative to others at the provided height", func() {
|
||||
header, err := api.GetHeaderByNumber(ctx, number)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(header).To(Equal(expectedCanonicalHeader))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("eth_getSlice", func() {
|
||||
It("Retrieves the state slice for root path", func() {
|
||||
path := "0x"
|
||||
depth := 3
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "1",
|
||||
"02-total-trie-nodes": "6",
|
||||
"03-leaves": "5",
|
||||
"04-smart-contracts": "1",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{
|
||||
account1DataHash: account1Data,
|
||||
account2DataHash: account2Data,
|
||||
account3DataHash: account3Data,
|
||||
account4DataHash: account4Data,
|
||||
account5DataHash: account5Data,
|
||||
},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{
|
||||
account1StateLeafKey: {
|
||||
StorageRoot: contractStorageRootBlock5.Hex(),
|
||||
EVMCode: account1Code,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for root path with 0 depth", func() {
|
||||
path := "0x"
|
||||
depth := 0
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for a path to an account", func() {
|
||||
path := "0x06"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block5StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block5StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "2",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "2",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "1",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
rootDataHashBlock5: rootDataBlock5,
|
||||
},
|
||||
Head: map[string]string{
|
||||
account1DataHash: account1Data,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{
|
||||
account1StateLeafKey: {
|
||||
StorageRoot: contractStorageRootBlock5.Hex(),
|
||||
EVMCode: account1Code,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the state slice for a path to a non-existing account", func() {
|
||||
path := "0x06"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, block1StateRoot, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, block1StateRoot.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
rootDataHashBlock1: rootDataBlock1,
|
||||
},
|
||||
Head: map[string]string{},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
|
||||
It("Retrieves the storage slice for root path", func() {
|
||||
path := "0x"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "1",
|
||||
"02-total-trie-nodes": "3",
|
||||
"03-leaves": "2",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Slice: map[string]string{
|
||||
storageNode1DataHash: storageNode1Data,
|
||||
storageNode2DataHash: storageNode2Data,
|
||||
},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for root path with 0 depth", func() {
|
||||
path := "0x"
|
||||
depth := 0
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "0",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for root path with deleted nodes", func() {
|
||||
path := "0x"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock5, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock5.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "1",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "1",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{},
|
||||
Head: map[string]string{
|
||||
storageRootDataHashBlock5: storageRootDataBlock5,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
It("Retrieves the storage slice for a path to a storage node", func() {
|
||||
path := "0x0b"
|
||||
depth := 2
|
||||
sliceResponse, err := api.GetSlice(ctx, path, depth, contractStorageRootBlock4, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedResponse := eth.GetSliceResponse{
|
||||
SliceID: fmt.Sprintf("%s-%d-%s", path, depth, contractStorageRootBlock4.String()),
|
||||
MetaData: eth.GetSliceResponseMetadata{
|
||||
NodeStats: map[string]string{
|
||||
"00-stem-and-head-nodes": "2",
|
||||
"01-max-depth": "0",
|
||||
"02-total-trie-nodes": "2",
|
||||
"03-leaves": "1",
|
||||
"04-smart-contracts": "0",
|
||||
},
|
||||
},
|
||||
TrieNodes: eth.GetSliceResponseTrieNodes{
|
||||
Stem: map[string]string{
|
||||
storageRootDataHashBlock4: storageRootDataBlock4,
|
||||
},
|
||||
Head: map[string]string{
|
||||
storageNode1DataHash: storageNode1Data,
|
||||
},
|
||||
Slice: map[string]string{},
|
||||
},
|
||||
Leaves: map[string]eth.GetSliceResponseAccount{},
|
||||
}
|
||||
|
||||
CheckGetSliceResponse(*sliceResponse, expectedResponse)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -16,6 +16,38 @@
|
||||
|
||||
package eth
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// SubscriptionSettings config is used by a subscriber to specify what eth data to stream from the watcher
|
||||
type SubscriptionSettings struct {
|
||||
BackFill bool
|
||||
BackFillOnly bool
|
||||
Start *big.Int
|
||||
End *big.Int // set to 0 or a negative value to have no ending block
|
||||
HeaderFilter HeaderFilter
|
||||
TxFilter TxFilter
|
||||
ReceiptFilter ReceiptFilter
|
||||
StateFilter StateFilter
|
||||
StorageFilter StorageFilter
|
||||
}
|
||||
|
||||
// HeaderFilter contains filter settings for headers
|
||||
type HeaderFilter struct {
|
||||
Off bool
|
||||
Uncles bool
|
||||
}
|
||||
|
||||
// TxFilter contains filter settings for txs
|
||||
type TxFilter struct {
|
||||
Off bool
|
||||
Src []string
|
||||
Dst []string
|
||||
}
|
||||
|
||||
// ReceiptFilter contains filter settings for receipts
|
||||
type ReceiptFilter struct {
|
||||
Off bool
|
||||
@ -24,3 +56,70 @@ type ReceiptFilter struct {
|
||||
LogAddresses []string // receipt contains logs from the provided addresses
|
||||
Topics [][]string
|
||||
}
|
||||
|
||||
// StateFilter contains filter settings for state
|
||||
type StateFilter struct {
|
||||
Off bool
|
||||
Addresses []string // is converted to state key by taking its keccak256 hash
|
||||
IntermediateNodes bool
|
||||
}
|
||||
|
||||
// StorageFilter contains filter settings for storage
|
||||
type StorageFilter struct {
|
||||
Off bool
|
||||
Addresses []string
|
||||
StorageKeys []string // need to be the hashs key themselves not slot position
|
||||
IntermediateNodes bool
|
||||
}
|
||||
|
||||
// Init is used to initialize a EthSubscription struct with env variables
|
||||
func NewEthSubscriptionConfig() (*SubscriptionSettings, error) {
|
||||
sc := new(SubscriptionSettings)
|
||||
// Below default to false, which means we do not backfill by default
|
||||
sc.BackFill = viper.GetBool("watcher.ethSubscription.historicalData")
|
||||
sc.BackFillOnly = viper.GetBool("watcher.ethSubscription.historicalDataOnly")
|
||||
// Below default to 0
|
||||
// 0 start means we start at the beginning and 0 end means we continue indefinitely
|
||||
sc.Start = big.NewInt(viper.GetInt64("watcher.ethSubscription.startingBlock"))
|
||||
sc.End = big.NewInt(viper.GetInt64("watcher.ethSubscription.endingBlock"))
|
||||
// Below default to false, which means we get all headers and no uncles by default
|
||||
sc.HeaderFilter = HeaderFilter{
|
||||
Off: viper.GetBool("watcher.ethSubscription.headerFilter.off"),
|
||||
Uncles: viper.GetBool("watcher.ethSubscription.headerFilter.uncles"),
|
||||
}
|
||||
// Below defaults to false and two slices of length 0
|
||||
// Which means we get all transactions by default
|
||||
sc.TxFilter = TxFilter{
|
||||
Off: viper.GetBool("watcher.ethSubscription.txFilter.off"),
|
||||
Src: viper.GetStringSlice("watcher.ethSubscription.txFilter.src"),
|
||||
Dst: viper.GetStringSlice("watcher.ethSubscription.txFilter.dst"),
|
||||
}
|
||||
// By default all of the topic slices will be empty => match on any/all topics
|
||||
topics := make([][]string, 4)
|
||||
topics[0] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic0s")
|
||||
topics[1] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic1s")
|
||||
topics[2] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic2s")
|
||||
topics[3] = viper.GetStringSlice("watcher.ethSubscription.receiptFilter.topic3s")
|
||||
sc.ReceiptFilter = ReceiptFilter{
|
||||
Off: viper.GetBool("watcher.ethSubscription.receiptFilter.off"),
|
||||
MatchTxs: viper.GetBool("watcher.ethSubscription.receiptFilter.matchTxs"),
|
||||
LogAddresses: viper.GetStringSlice("watcher.ethSubscription.receiptFilter.contracts"),
|
||||
Topics: topics,
|
||||
}
|
||||
// Below defaults to two false, and a slice of length 0
|
||||
// Which means we get all state leafs by default, but no intermediate nodes
|
||||
sc.StateFilter = StateFilter{
|
||||
Off: viper.GetBool("watcher.ethSubscription.stateFilter.off"),
|
||||
IntermediateNodes: viper.GetBool("watcher.ethSubscription.stateFilter.intermediateNodes"),
|
||||
Addresses: viper.GetStringSlice("watcher.ethSubscription.stateFilter.addresses"),
|
||||
}
|
||||
// Below defaults to two false, and two slices of length 0
|
||||
// Which means we get all storage leafs by default, but no intermediate nodes
|
||||
sc.StorageFilter = StorageFilter{
|
||||
Off: viper.GetBool("watcher.ethSubscription.storageFilter.off"),
|
||||
IntermediateNodes: viper.GetBool("watcher.ethSubscription.storageFilter.intermediateNodes"),
|
||||
Addresses: viper.GetStringSlice("watcher.ethSubscription.storageFilter.addresses"),
|
||||
StorageKeys: viper.GetStringSlice("watcher.ethSubscription.storageFilter.storageKeys"),
|
||||
}
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
@ -14,16 +14,28 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_api_test
|
||||
package eth
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
)
|
||||
|
||||
func TestETHSuite(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ipld-eth-server/pkg/eth/api_test")
|
||||
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
|
||||
func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
|
||||
for _, tx := range txs {
|
||||
if tx.CID == cid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ReceiptModelsContainsCID used to check if a list of ReceiptModel contains a specific cid string
|
||||
func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
|
||||
for _, rct := range rcts {
|
||||
if rct.LeafCID == cid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -19,7 +19,6 @@ package test_helpers
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/test_helpers"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -36,8 +35,7 @@ var (
|
||||
TestBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
TestBankAddress = crypto.PubkeyToAddress(TestBankKey.PublicKey) //0x71562b71999873DB5b286dF957af199Ec94617F7
|
||||
TestBankFunds = big.NewInt(100000000)
|
||||
|
||||
Genesis = test_helpers.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds, big.NewInt(params.InitialBaseFee), params.MaxGasLimit)
|
||||
Genesis = core.GenesisBlockForTesting(Testdb, TestBankAddress, TestBankFunds)
|
||||
|
||||
Account1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||
Account2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
||||
@ -62,12 +60,11 @@ data function sig: 73d4a13a
|
||||
|
||||
// MakeChain creates a chain of n blocks starting at and including parent.
|
||||
// the returned hash chain is ordered head->parent.
|
||||
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen), config *params.ChainConfig) ([]*types.Block, []types.Receipts, *core.BlockChain) {
|
||||
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) {
|
||||
config := params.TestChainConfig
|
||||
config.LondonBlock = big.NewInt(100)
|
||||
blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
|
||||
chain, err := core.NewBlockChain(Testdb, nil, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
return append([]*types.Block{parent}, blocks...), receipts, chain
|
||||
}
|
||||
|
||||
|
||||
@ -17,35 +17,37 @@
|
||||
package test_helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||
testhelpers "github.com/ethereum/go-ethereum/statediff/test_helpers"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/multiformats/go-multihash"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// Test variables
|
||||
var (
|
||||
// block data
|
||||
BlockNumber1 = int64(1)
|
||||
BlockTime1 = uint64(0)
|
||||
MockHeader = types.Header{
|
||||
BlockNumber = big.NewInt(1)
|
||||
MockHeader = types.Header{
|
||||
Time: 0,
|
||||
Number: big.NewInt(BlockNumber1),
|
||||
Number: new(big.Int).Set(BlockNumber),
|
||||
Root: common.HexToHash("0x0"),
|
||||
TxHash: common.HexToHash("0x0"),
|
||||
ReceiptHash: common.HexToHash("0x0"),
|
||||
@ -53,36 +55,32 @@ var (
|
||||
Extra: []byte{},
|
||||
}
|
||||
MockTransactions, MockReceipts, SenderAddr = createLegacyTransactionsAndReceipts()
|
||||
MockWithdrawals = types.Withdrawals{
|
||||
{Index: 0, Validator: 1, Address: Address, Amount: 1000000000},
|
||||
{Index: 1, Validator: 5, Address: AnotherAddress, Amount: 2000000000},
|
||||
}
|
||||
MockUncles = []*types.Header{
|
||||
MockUncles = []*types.Header{
|
||||
{
|
||||
Time: 1,
|
||||
Number: big.NewInt(BlockNumber1 + 1),
|
||||
Number: new(big.Int).Add(BlockNumber, big.NewInt(1)),
|
||||
Root: common.HexToHash("0x1"),
|
||||
TxHash: common.HexToHash("0x1"),
|
||||
ReceiptHash: common.HexToHash("0x1"),
|
||||
Difficulty: big.NewInt(500001),
|
||||
Extra: []byte{},
|
||||
ParentHash: Genesis.Hash(),
|
||||
},
|
||||
{
|
||||
Time: 2,
|
||||
Number: big.NewInt(BlockNumber1 + 2),
|
||||
Number: new(big.Int).Add(BlockNumber, big.NewInt(2)),
|
||||
Root: common.HexToHash("0x2"),
|
||||
TxHash: common.HexToHash("0x2"),
|
||||
ReceiptHash: common.HexToHash("0x2"),
|
||||
Difficulty: big.NewInt(500002),
|
||||
Extra: []byte{},
|
||||
ParentHash: Genesis.Hash(),
|
||||
},
|
||||
}
|
||||
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, nil, trie.NewEmpty(nil))
|
||||
MockChildHeader = types.Header{
|
||||
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
|
||||
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
|
||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||
MockChildHeader = types.Header{
|
||||
Time: 0,
|
||||
Number: big.NewInt(BlockNumber1 + 1),
|
||||
Number: new(big.Int).Add(BlockNumber, common.Big1),
|
||||
Root: common.HexToHash("0x0"),
|
||||
TxHash: common.HexToHash("0x0"),
|
||||
ReceiptHash: common.HexToHash("0x0"),
|
||||
@ -90,27 +88,30 @@ var (
|
||||
Extra: []byte{},
|
||||
ParentHash: MockBlock.Header().Hash(),
|
||||
}
|
||||
MockChild = types.NewBlock(&MockChildHeader, MockTransactions, MockUncles, MockReceipts, trie.NewEmpty(nil))
|
||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
|
||||
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
|
||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||
mockTopic11 = common.HexToHash("0x04")
|
||||
mockTopic12 = common.HexToHash("0x06")
|
||||
mockTopic21 = common.HexToHash("0x05")
|
||||
mockTopic22 = common.HexToHash("0x07")
|
||||
mockTopic31 = common.HexToHash("0x08")
|
||||
mockTopic41 = common.HexToHash("0x09")
|
||||
mockTopic42 = common.HexToHash("0x0a")
|
||||
mockTopic43 = common.HexToHash("0x0b")
|
||||
mockTopic51 = common.HexToHash("0x0c")
|
||||
mockTopic61 = common.HexToHash("0x0d")
|
||||
MockLog1 = &types.Log{
|
||||
MockChild = types.NewBlock(&MockChildHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
|
||||
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
|
||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
|
||||
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
|
||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
|
||||
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
||||
mockTopic11 = common.HexToHash("0x04")
|
||||
mockTopic12 = common.HexToHash("0x06")
|
||||
mockTopic21 = common.HexToHash("0x05")
|
||||
mockTopic22 = common.HexToHash("0x07")
|
||||
mockTopic31 = common.HexToHash("0x08")
|
||||
mockTopic41 = common.HexToHash("0x09")
|
||||
mockTopic42 = common.HexToHash("0x0a")
|
||||
mockTopic43 = common.HexToHash("0x0b")
|
||||
mockTopic51 = common.HexToHash("0x0c")
|
||||
mockTopic61 = common.HexToHash("0x0d")
|
||||
MockLog1 = &types.Log{
|
||||
Address: Address,
|
||||
Topics: []common.Hash{mockTopic11, mockTopic12},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 0,
|
||||
Index: 0,
|
||||
}
|
||||
@ -118,7 +119,7 @@ var (
|
||||
Address: AnotherAddress,
|
||||
Topics: []common.Hash{mockTopic21, mockTopic22},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 1,
|
||||
Index: 1,
|
||||
}
|
||||
@ -126,7 +127,7 @@ var (
|
||||
Address: AnotherAddress1,
|
||||
Topics: []common.Hash{mockTopic31},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 2,
|
||||
Index: 2,
|
||||
}
|
||||
@ -135,7 +136,7 @@ var (
|
||||
Address: AnotherAddress1,
|
||||
Topics: []common.Hash{mockTopic41, mockTopic42, mockTopic43},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 2,
|
||||
Index: 3,
|
||||
}
|
||||
@ -143,7 +144,7 @@ var (
|
||||
Address: AnotherAddress1,
|
||||
Topics: []common.Hash{mockTopic51},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 2,
|
||||
Index: 4,
|
||||
}
|
||||
@ -151,60 +152,184 @@ var (
|
||||
Address: AnotherAddress2,
|
||||
Topics: []common.Hash{mockTopic61},
|
||||
Data: []byte{},
|
||||
BlockNumber: uint64(BlockNumber1),
|
||||
BlockNumber: BlockNumber.Uint64(),
|
||||
TxIndex: 3,
|
||||
Index: 5,
|
||||
}
|
||||
|
||||
rctCIDs, _ = getReceiptCIDs(MockReceipts)
|
||||
Rct1CID = rctCIDs[0]
|
||||
Rct4CID = rctCIDs[3]
|
||||
MockTrxMeta = []models.TxModel{
|
||||
Tx1 = GetTxnRlp(0, MockTransactions)
|
||||
Tx2 = GetTxnRlp(1, MockTransactions)
|
||||
Tx3 = GetTxnRlp(2, MockTransactions)
|
||||
Tx4 = GetTxnRlp(3, MockTransactions)
|
||||
|
||||
rctCIDs, rctIPLDData, _ = eth.GetRctLeafNodeData(MockReceipts)
|
||||
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
|
||||
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
|
||||
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx1, multihash.KECCAK_256)
|
||||
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
|
||||
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx2, multihash.KECCAK_256)
|
||||
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
|
||||
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256)
|
||||
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
|
||||
Trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx4, multihash.KECCAK_256)
|
||||
Trx4MhKey = shared.MultihashKeyFromCID(Trx4CID)
|
||||
Rct1CID = rctCIDs[0]
|
||||
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
|
||||
Rct2CID = rctCIDs[1]
|
||||
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
|
||||
Rct3CID = rctCIDs[2]
|
||||
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
|
||||
Rct4CID = rctCIDs[3]
|
||||
Rct4MhKey = shared.MultihashKeyFromCID(Rct4CID)
|
||||
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
|
||||
State1MhKey = shared.MultihashKeyFromCID(State1CID)
|
||||
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
|
||||
State2MhKey = shared.MultihashKeyFromCID(State2CID)
|
||||
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
|
||||
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
|
||||
Rct1IPLD = rctIPLDData[0]
|
||||
Rct2IPLD = rctIPLDData[1]
|
||||
Rct3IPLD = rctIPLDData[2]
|
||||
Rct4IPLD = rctIPLDData[3]
|
||||
MockTrxMeta = []models.TxModel{
|
||||
{
|
||||
CID: "", // This is empty until we go to publish to ipfs
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: Address.String(),
|
||||
Index: 0,
|
||||
TxHash: MockTransactions[0].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: AnotherAddress.String(),
|
||||
Index: 1,
|
||||
TxHash: MockTransactions[1].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: "",
|
||||
Index: 2,
|
||||
TxHash: MockTransactions[2].Hash().String(),
|
||||
Data: MockContractByteCode,
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
MhKey: "",
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: "",
|
||||
Index: 3,
|
||||
TxHash: MockTransactions[3].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
}
|
||||
MockTrxMetaPostPublsh = []models.TxModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
||||
MhKey: Trx1MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: Address.String(),
|
||||
Index: 0,
|
||||
TxHash: MockTransactions[0].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx2CID.String(),
|
||||
MhKey: Trx2MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: AnotherAddress.String(),
|
||||
Index: 1,
|
||||
TxHash: MockTransactions[1].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx3CID.String(),
|
||||
MhKey: Trx3MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: "",
|
||||
Index: 2,
|
||||
TxHash: MockTransactions[2].Hash().String(),
|
||||
Data: MockContractByteCode,
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: Trx4CID.String(),
|
||||
MhKey: Trx4MhKey,
|
||||
Src: SenderAddr.Hex(),
|
||||
Dst: AnotherAddress1.String(),
|
||||
Index: 3,
|
||||
TxHash: MockTransactions[3].Hash().String(),
|
||||
Data: []byte{},
|
||||
},
|
||||
}
|
||||
MockRctMeta = []models.ReceiptModel{
|
||||
{
|
||||
CID: "",
|
||||
Contract: "",
|
||||
LeafCID: "",
|
||||
LeafMhKey: "",
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Contract: "",
|
||||
LeafCID: "",
|
||||
LeafMhKey: "",
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Contract: ContractAddress.String(),
|
||||
LeafCID: "",
|
||||
LeafMhKey: "",
|
||||
Contract: ContractAddress.String(),
|
||||
ContractHash: ContractHash,
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Contract: "",
|
||||
LeafCID: "",
|
||||
LeafMhKey: "",
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
}
|
||||
|
||||
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct1CID.String(),
|
||||
LeafMhKey: Rct1MhKey,
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct2CID.String(),
|
||||
LeafMhKey: Rct2MhKey,
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct3CID.String(),
|
||||
LeafMhKey: Rct3MhKey,
|
||||
Contract: ContractAddress.String(),
|
||||
ContractHash: ContractHash,
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
HeaderID: MockBlock.Hash().String(),
|
||||
LeafCID: Rct4CID.String(),
|
||||
LeafMhKey: Rct4MhKey,
|
||||
Contract: "",
|
||||
ContractHash: "",
|
||||
},
|
||||
}
|
||||
|
||||
@ -218,70 +343,89 @@ var (
|
||||
StorageValue,
|
||||
})
|
||||
|
||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||
contractPath = common.Bytes2Hex([]byte{'\x06'})
|
||||
ContractLeafKey = crypto.Keccak256(ContractAddress[:])
|
||||
ContractAccount = types.StateAccount{
|
||||
Nonce: uint64(1),
|
||||
Balance: uint256.NewInt(0),
|
||||
CodeHash: CodeHash.Bytes(),
|
||||
nonce1 = uint64(1)
|
||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||
ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode)
|
||||
contractPath = common.Bytes2Hex([]byte{'\x06'})
|
||||
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
||||
ContractAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
|
||||
Nonce: nonce1,
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
Root: common.HexToHash(ContractRoot),
|
||||
}
|
||||
ContractAccountRLP, _ = rlp.EncodeToBytes(&ContractAccount)
|
||||
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
||||
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||
})
|
||||
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
||||
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||
ContractPartialPath,
|
||||
ContractAccountRLP,
|
||||
ContractAccount,
|
||||
})
|
||||
|
||||
nonce0 = uint64(0)
|
||||
AccountBalance = uint256.NewInt(1000)
|
||||
AccountBalance = big.NewInt(1000)
|
||||
AccountRoot = "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
|
||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
|
||||
AccountLeafKey = crypto.Keccak256(AccountAddresss[:])
|
||||
Account = types.StateAccount{
|
||||
AccountLeafKey = testhelpers.Account2LeafKey
|
||||
Account, _ = rlp.EncodeToBytes(&types.StateAccount{
|
||||
Nonce: nonce0,
|
||||
Balance: AccountBalance,
|
||||
CodeHash: AccountCodeHash.Bytes(),
|
||||
Root: common.HexToHash(AccountRoot),
|
||||
}
|
||||
AccountRLP, _ = rlp.EncodeToBytes(&Account)
|
||||
})
|
||||
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
|
||||
AccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||
AccountPartialPath,
|
||||
AccountRLP,
|
||||
Account,
|
||||
})
|
||||
|
||||
MockStateNodes = []sdtypes.StateLeafNode{
|
||||
MockStateNodes = []sdtypes.StateNode{
|
||||
{
|
||||
AccountWrapper: sdtypes.AccountWrapper{
|
||||
Account: &ContractAccount,
|
||||
LeafKey: ContractLeafKey,
|
||||
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ContractLeafNode)).String(),
|
||||
},
|
||||
StorageDiff: []sdtypes.StorageLeafNode{
|
||||
LeafKey: ContractLeafKey,
|
||||
Path: []byte{'\x06'},
|
||||
NodeValue: ContractLeafNode,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: []sdtypes.StorageNode{
|
||||
{
|
||||
LeafKey: StorageLeafKey,
|
||||
Value: StorageValue,
|
||||
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String(),
|
||||
Path: []byte{},
|
||||
NodeType: sdtypes.Leaf,
|
||||
LeafKey: StorageLeafKey,
|
||||
NodeValue: StorageLeafNode,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
AccountWrapper: sdtypes.AccountWrapper{
|
||||
Account: &Account,
|
||||
LeafKey: AccountLeafKey,
|
||||
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(AccountLeafNode)).String(),
|
||||
},
|
||||
LeafKey: AccountLeafKey,
|
||||
Path: []byte{'\x0c'},
|
||||
NodeValue: AccountLeafNode,
|
||||
NodeType: sdtypes.Leaf,
|
||||
StorageNodes: []sdtypes.StorageNode{},
|
||||
},
|
||||
}
|
||||
MockStorageNodes = map[string][]sdtypes.StorageLeafNode{
|
||||
MockStateMetaPostPublish = []models.StateNodeModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: State1CID.String(),
|
||||
MhKey: State1MhKey,
|
||||
Path: []byte{'\x06'},
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||
},
|
||||
{
|
||||
BlockNumber: "1",
|
||||
CID: State2CID.String(),
|
||||
MhKey: State2MhKey,
|
||||
Path: []byte{'\x0c'},
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
||||
},
|
||||
}
|
||||
MockStorageNodes = map[string][]sdtypes.StorageNode{
|
||||
contractPath: {
|
||||
{
|
||||
LeafKey: StorageLeafKey,
|
||||
Value: StorageValue,
|
||||
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String(),
|
||||
LeafKey: StorageLeafKey,
|
||||
NodeValue: StorageLeafNode,
|
||||
NodeType: sdtypes.Leaf,
|
||||
Path: []byte{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -295,46 +439,166 @@ var (
|
||||
StorageNodes: MockStorageNodes,
|
||||
StateNodes: MockStateNodes,
|
||||
}
|
||||
MockConvertedPayloadForChild = eth.ConvertedPayload{
|
||||
TotalDifficulty: MockChild.Difficulty(),
|
||||
Block: MockChild,
|
||||
Receipts: MockReceipts,
|
||||
TxMetaData: MockTrxMeta,
|
||||
ReceiptMetaData: MockRctMeta,
|
||||
StorageNodes: MockStorageNodes,
|
||||
StateNodes: MockStateNodes,
|
||||
}
|
||||
|
||||
LondonBlockNum = (BlockNumber1 + 2)
|
||||
LondonBlockTime = BlockTime1 + 1
|
||||
Reward = shared.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts)
|
||||
MockCIDWrapper = ð.CIDWrapper{
|
||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||
Header: models.HeaderModel{
|
||||
BlockNumber: "1",
|
||||
BlockHash: MockBlock.Hash().String(),
|
||||
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
CID: HeaderCID.String(),
|
||||
MhKey: HeaderMhKey,
|
||||
TotalDifficulty: MockBlock.Difficulty().String(),
|
||||
Reward: Reward.String(),
|
||||
StateRoot: MockBlock.Root().String(),
|
||||
RctRoot: MockBlock.ReceiptHash().String(),
|
||||
TxRoot: MockBlock.TxHash().String(),
|
||||
UncleRoot: MockBlock.UncleHash().String(),
|
||||
Bloom: MockBlock.Bloom().Bytes(),
|
||||
Timestamp: MockBlock.Time(),
|
||||
TimesValidated: 1,
|
||||
Coinbase: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
Transactions: MockTrxMetaPostPublsh,
|
||||
Receipts: MockRctMetaPostPublish,
|
||||
Uncles: []models.UncleModel{},
|
||||
StateNodes: MockStateMetaPostPublish,
|
||||
StorageNodes: []models.StorageNodeWithStateKeyModel{
|
||||
{
|
||||
BlockNumber: "1",
|
||||
Path: []byte{},
|
||||
CID: StorageCID.String(),
|
||||
MhKey: StorageMhKey,
|
||||
NodeType: 2,
|
||||
StateKey: common.BytesToHash(ContractLeafKey).Hex(),
|
||||
StorageKey: common.BytesToHash(StorageLeafKey).Hex(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
|
||||
Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID)
|
||||
Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID)
|
||||
Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID)
|
||||
Trx4IPLD, _ = blocks.NewBlockWithCid(Tx4, Trx4CID)
|
||||
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
|
||||
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
|
||||
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
|
||||
|
||||
MockIPLDs = eth.IPLDs{
|
||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||
Header: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: HeaderIPLD.RawData(),
|
||||
Key: HeaderIPLD.Cid().String(),
|
||||
},
|
||||
Transactions: []models.IPLDModel{
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx1IPLD.RawData(),
|
||||
Key: Trx1IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx2IPLD.RawData(),
|
||||
Key: Trx2IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx3IPLD.RawData(),
|
||||
Key: Trx3IPLD.Cid().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Trx4IPLD.RawData(),
|
||||
Key: Trx4IPLD.Cid().String(),
|
||||
},
|
||||
},
|
||||
Receipts: []models.IPLDModel{
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct1IPLD,
|
||||
Key: Rct1CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct2IPLD,
|
||||
Key: Rct2CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct3IPLD,
|
||||
Key: Rct3CID.String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: Rct4IPLD,
|
||||
Key: Rct4CID.String(),
|
||||
},
|
||||
},
|
||||
StateNodes: []eth.StateNode{
|
||||
{
|
||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: State1IPLD.RawData(),
|
||||
Key: State1IPLD.Cid().String(),
|
||||
},
|
||||
Path: []byte{'\x06'},
|
||||
},
|
||||
{
|
||||
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: State2IPLD.RawData(),
|
||||
Key: State2IPLD.Cid().String(),
|
||||
},
|
||||
Path: []byte{'\x0c'},
|
||||
},
|
||||
},
|
||||
StorageNodes: []eth.StorageNode{
|
||||
{
|
||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
||||
Type: sdtypes.Leaf,
|
||||
IPLD: models.IPLDModel{
|
||||
BlockNumber: BlockNumber.String(),
|
||||
Data: StorageIPLD.RawData(),
|
||||
Key: StorageIPLD.Cid().String(),
|
||||
},
|
||||
Path: []byte{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
LondonBlockNum = new(big.Int).Add(BlockNumber, big.NewInt(2))
|
||||
MockLondonHeader = types.Header{
|
||||
Time: LondonBlockTime,
|
||||
Number: big.NewInt(LondonBlockNum),
|
||||
Time: 0,
|
||||
Number: LondonBlockNum,
|
||||
Root: common.HexToHash("0x00"),
|
||||
Difficulty: big.NewInt(5000000),
|
||||
Extra: []byte{},
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
}
|
||||
|
||||
MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(big.NewInt(LondonBlockNum), LondonBlockTime)
|
||||
MockLondonUncles = []*types.Header{
|
||||
{
|
||||
Time: 1,
|
||||
Number: big.NewInt(BlockNumber1 + 1),
|
||||
ParentHash: common.HexToHash("0x2"),
|
||||
Root: common.HexToHash("0x1"),
|
||||
TxHash: common.HexToHash("0x1"),
|
||||
ReceiptHash: common.HexToHash("0x1"),
|
||||
Difficulty: big.NewInt(500001),
|
||||
Extra: []byte{},
|
||||
},
|
||||
{
|
||||
Time: 2,
|
||||
Number: big.NewInt(BlockNumber1 + 1),
|
||||
ParentHash: common.HexToHash("0x1"),
|
||||
Root: common.HexToHash("0x2"),
|
||||
TxHash: common.HexToHash("0x2"),
|
||||
ReceiptHash: common.HexToHash("0x2"),
|
||||
Difficulty: big.NewInt(500002),
|
||||
Extra: []byte{},
|
||||
},
|
||||
}
|
||||
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, MockLondonUncles, MockLondonReceipts, MockWithdrawals, trie.NewEmpty(nil))
|
||||
MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(LondonBlockNum)
|
||||
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie))
|
||||
)
|
||||
|
||||
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, withdrawals types.Withdrawals, hasher types.TrieHasher) *types.Block {
|
||||
block := types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, hasher)
|
||||
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block {
|
||||
block := types.NewBlock(header, txs, uncles, receipts, hasher)
|
||||
bHash := block.Hash()
|
||||
for _, r := range receipts {
|
||||
for _, l := range r.Logs {
|
||||
@ -345,9 +609,9 @@ func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*ty
|
||||
}
|
||||
|
||||
// createDynamicTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||
func createDynamicTransactionsAndReceipts(blockNumber *big.Int, blockTime uint64) (types.Transactions, types.Receipts, common.Address) {
|
||||
func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transactions, types.Receipts, common.Address) {
|
||||
// make transactions
|
||||
config := *params.TestChainConfig
|
||||
config := params.TestChainConfig
|
||||
config.LondonBlock = blockNumber
|
||||
trx1 := types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: config.ChainID,
|
||||
@ -360,7 +624,7 @@ func createDynamicTransactionsAndReceipts(blockNumber *big.Int, blockTime uint64
|
||||
Data: []byte{},
|
||||
})
|
||||
|
||||
transactionSigner := types.MakeSigner(&config, blockNumber, blockTime)
|
||||
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
@ -396,9 +660,9 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
|
||||
// make transactions
|
||||
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
||||
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), ContractCode)
|
||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||
trx4 := types.NewTransaction(3, AnotherAddress1, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
transactionSigner := types.MakeSigner(params.MainnetChainConfig, big.NewInt(BlockNumber1), 0)
|
||||
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
@ -456,14 +720,20 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
|
||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4}, SenderAddr
|
||||
}
|
||||
|
||||
func getReceiptCIDs(rcts []*types.Receipt) ([]cid.Cid, error) {
|
||||
cids := make([]cid.Cid, len(rcts))
|
||||
for i, rct := range rcts {
|
||||
ethRct, err := ipld.EncodeReceipt(rct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cids[i] = ethRct.Cid()
|
||||
}
|
||||
return cids, nil
|
||||
func GetTxnRlp(num int, txs types.Transactions) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
txs.EncodeIndex(num, buf)
|
||||
tx := make([]byte, buf.Len())
|
||||
copy(tx, buf.Bytes())
|
||||
buf.Reset()
|
||||
return tx
|
||||
}
|
||||
|
||||
func GetRctRlp(num int, rcts types.Receipts) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
rcts.EncodeIndex(num, buf)
|
||||
rct := make([]byte, buf.Len())
|
||||
copy(rct, buf.Bytes())
|
||||
buf.Reset()
|
||||
return rct
|
||||
}
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
package eth
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// TransactionArgs represents the arguments to construct a new transaction
|
||||
// or a message call.
|
||||
type TransactionArgs struct {
|
||||
From *common.Address `json:"from"`
|
||||
To *common.Address `json:"to"`
|
||||
Gas *hexutil.Uint64 `json:"gas"`
|
||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
|
||||
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
|
||||
Value *hexutil.Big `json:"value"`
|
||||
Nonce *hexutil.Uint64 `json:"nonce"`
|
||||
|
||||
// We accept "data" and "input" for backwards-compatibility reasons.
|
||||
// "input" is the newer name and should be preferred by clients.
|
||||
// Issue detail: https://github.com/ethereum/go-ethereum/issues/15628
|
||||
Data *hexutil.Bytes `json:"data"`
|
||||
Input *hexutil.Bytes `json:"input"`
|
||||
|
||||
// Introduced by AccessListTxType transaction.
|
||||
AccessList *types.AccessList `json:"accessList,omitempty"`
|
||||
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
||||
}
|
||||
|
||||
// from retrieves the transaction sender address.
|
||||
func (args *TransactionArgs) from() common.Address {
|
||||
if args.From == nil {
|
||||
return common.Address{}
|
||||
}
|
||||
return *args.From
|
||||
}
|
||||
|
||||
// data retrieves the transaction calldata. Input field is preferred.
|
||||
func (args *TransactionArgs) data() []byte {
|
||||
if args.Input != nil {
|
||||
return *args.Input
|
||||
}
|
||||
if args.Data != nil {
|
||||
return *args.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
180
pkg/eth/types.go
180
pkg/eth/types.go
@ -18,45 +18,38 @@ package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
|
||||
// Note: copied from go-ethereum/internal/ethapi
|
||||
type RPCTransaction struct {
|
||||
BlockHash *common.Hash `json:"blockHash"`
|
||||
BlockNumber *hexutil.Big `json:"blockNumber"`
|
||||
From common.Address `json:"from"`
|
||||
Gas hexutil.Uint64 `json:"gas"`
|
||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||
GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"`
|
||||
GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"`
|
||||
MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
Input hexutil.Bytes `json:"input"`
|
||||
Nonce hexutil.Uint64 `json:"nonce"`
|
||||
To *common.Address `json:"to"`
|
||||
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
|
||||
Value *hexutil.Big `json:"value"`
|
||||
Type hexutil.Uint64 `json:"type"`
|
||||
Accesses *types.AccessList `json:"accessList,omitempty"`
|
||||
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
||||
BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
|
||||
V *hexutil.Big `json:"v"`
|
||||
R *hexutil.Big `json:"r"`
|
||||
S *hexutil.Big `json:"s"`
|
||||
YParity *hexutil.Uint64 `json:"yParity,omitempty"`
|
||||
BlockHash *common.Hash `json:"blockHash"`
|
||||
BlockNumber *hexutil.Big `json:"blockNumber"`
|
||||
From common.Address `json:"from"`
|
||||
Gas hexutil.Uint64 `json:"gas"`
|
||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||
GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"`
|
||||
GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"`
|
||||
Hash common.Hash `json:"hash"`
|
||||
Input hexutil.Bytes `json:"input"`
|
||||
Nonce hexutil.Uint64 `json:"nonce"`
|
||||
To *common.Address `json:"to"`
|
||||
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
|
||||
Value *hexutil.Big `json:"value"`
|
||||
Type hexutil.Uint64 `json:"type"`
|
||||
Accesses *types.AccessList `json:"accessList,omitempty"`
|
||||
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
||||
V *hexutil.Big `json:"v"`
|
||||
R *hexutil.Big `json:"r"`
|
||||
S *hexutil.Big `json:"s"`
|
||||
}
|
||||
|
||||
// RPCReceipt represents a receipt that will serialize to the RPC representation of a receipt
|
||||
@ -130,10 +123,10 @@ func (arg *CallArgs) data() []byte {
|
||||
// ToMessage converts the transaction arguments to the Message type used by the
|
||||
// core evm. This method is used in calls and traces that do not require a real
|
||||
// live transaction.
|
||||
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Message, error) {
|
||||
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) {
|
||||
// Reject invalid combinations of pre- and post-1559 fee styles
|
||||
if arg.GasPrice != nil && (arg.MaxFeePerGas != nil || arg.MaxPriorityFeePerGas != nil) {
|
||||
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||
return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||
}
|
||||
// Set sender address or use zero address if none specified.
|
||||
addr := arg.from()
|
||||
@ -147,7 +140,7 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Mes
|
||||
gas = uint64(*arg.Gas)
|
||||
}
|
||||
if globalGasCap != 0 && globalGasCap < gas {
|
||||
log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
|
||||
logrus.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
|
||||
gas = globalGasCap
|
||||
}
|
||||
var (
|
||||
@ -194,22 +187,51 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (*core.Mes
|
||||
if arg.AccessList != nil {
|
||||
accessList = *arg.AccessList
|
||||
}
|
||||
msg := &core.Message{
|
||||
Nonce: 0,
|
||||
GasLimit: gas,
|
||||
GasPrice: gasPrice,
|
||||
GasFeeCap: gasFeeCap,
|
||||
GasTipCap: gasTipCap,
|
||||
To: arg.To,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
SkipAccountChecks: true,
|
||||
From: addr,
|
||||
}
|
||||
msg := types.NewMessage(addr, arg.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true)
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
|
||||
// Returned by IPLDFetcher and ResponseFilterer
|
||||
type IPLDs struct {
|
||||
BlockNumber *big.Int
|
||||
TotalDifficulty *big.Int
|
||||
Header models.IPLDModel
|
||||
Uncles []models.IPLDModel
|
||||
Transactions []models.IPLDModel
|
||||
Receipts []models.IPLDModel
|
||||
StateNodes []StateNode
|
||||
StorageNodes []StorageNode
|
||||
}
|
||||
|
||||
type StateNode struct {
|
||||
Type sdtypes.NodeType
|
||||
StateLeafKey common.Hash
|
||||
Path []byte
|
||||
IPLD models.IPLDModel
|
||||
}
|
||||
|
||||
type StorageNode struct {
|
||||
Type sdtypes.NodeType
|
||||
StateLeafKey common.Hash
|
||||
StorageLeafKey common.Hash
|
||||
Path []byte
|
||||
IPLD models.IPLDModel
|
||||
}
|
||||
|
||||
// CIDWrapper is used to direct fetching of IPLDs from IPFS
|
||||
// Returned by CIDRetriever
|
||||
// Passed to IPLDFetcher
|
||||
type CIDWrapper struct {
|
||||
BlockNumber *big.Int
|
||||
Header models.HeaderModel
|
||||
Uncles []models.UncleModel
|
||||
Transactions []models.TxModel
|
||||
Receipts []models.ReceiptModel
|
||||
StateNodes []models.StateNodeModel
|
||||
StorageNodes []models.StorageNodeWithStateKeyModel
|
||||
}
|
||||
|
||||
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
|
||||
// Returned by PayloadConverter
|
||||
// Passed to IPLDPublisher and ResponseFilterer
|
||||
@ -219,13 +241,13 @@ type ConvertedPayload struct {
|
||||
TxMetaData []models.TxModel
|
||||
Receipts types.Receipts
|
||||
ReceiptMetaData []models.ReceiptModel
|
||||
StateNodes []sdtypes.StateLeafNode
|
||||
StorageNodes map[string][]sdtypes.StorageLeafNode
|
||||
StateNodes []sdtypes.StateNode
|
||||
StorageNodes map[string][]sdtypes.StorageNode
|
||||
}
|
||||
|
||||
// LogResult represent a log.
|
||||
type LogResult struct {
|
||||
LeafCID string `db:"cid"`
|
||||
LeafCID string `db:"leaf_cid"`
|
||||
ReceiptID string `db:"rct_id"`
|
||||
Address string `db:"address"`
|
||||
Index int64 `db:"index"`
|
||||
@ -235,70 +257,10 @@ type LogResult struct {
|
||||
Topic2 string `db:"topic2"`
|
||||
Topic3 string `db:"topic3"`
|
||||
LogLeafData []byte `db:"data"`
|
||||
RctCID string `db:"rct_cid"`
|
||||
RctCID string `db:"cid"`
|
||||
RctStatus uint64 `db:"post_status"`
|
||||
BlockNumber string `db:"block_number"`
|
||||
BlockHash string `db:"block_hash"`
|
||||
TxnIndex int64 `db:"txn_index"`
|
||||
TxHash string `db:"tx_hash"`
|
||||
}
|
||||
|
||||
// GetSliceResponse holds response for the eth_getSlice method
|
||||
type GetSliceResponse struct {
|
||||
SliceID string `json:"sliceId"`
|
||||
MetaData GetSliceResponseMetadata `json:"metadata"`
|
||||
TrieNodes GetSliceResponseTrieNodes `json:"trieNodes"`
|
||||
Leaves map[string]GetSliceResponseAccount `json:"leaves"` // key: Keccak256Hash(address) in hex (leafKey)
|
||||
}
|
||||
|
||||
func (sr *GetSliceResponse) init(path string, depth int, root common.Hash) {
|
||||
sr.SliceID = fmt.Sprintf("%s-%d-%s", path, depth, root.String())
|
||||
sr.MetaData = GetSliceResponseMetadata{
|
||||
NodeStats: make(map[string]string, 0),
|
||||
TimeStats: make(map[string]string, 0),
|
||||
}
|
||||
sr.Leaves = make(map[string]GetSliceResponseAccount)
|
||||
sr.TrieNodes = GetSliceResponseTrieNodes{
|
||||
Stem: make(map[string]string),
|
||||
Head: make(map[string]string),
|
||||
Slice: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *GetSliceResponse) populateMetaData(metaData metaDataFields) {
|
||||
sr.MetaData.NodeStats["00-stem-and-head-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head))
|
||||
sr.MetaData.NodeStats["01-max-depth"] = strconv.Itoa(metaData.maxDepth)
|
||||
sr.MetaData.NodeStats["02-total-trie-nodes"] = strconv.Itoa(len(sr.TrieNodes.Stem) + len(sr.TrieNodes.Head) + len(sr.TrieNodes.Slice))
|
||||
sr.MetaData.NodeStats["03-leaves"] = strconv.Itoa(metaData.leafCount)
|
||||
sr.MetaData.NodeStats["04-smart-contracts"] = strconv.Itoa(len(sr.Leaves))
|
||||
|
||||
sr.MetaData.TimeStats["00-trie-loading"] = strconv.FormatInt(metaData.trieLoadingTime, 10)
|
||||
sr.MetaData.TimeStats["01-fetch-stem-keys"] = strconv.FormatInt(metaData.stemNodesFetchTime, 10)
|
||||
sr.MetaData.TimeStats["02-fetch-slice-keys"] = strconv.FormatInt(metaData.sliceNodesFetchTime, 10)
|
||||
sr.MetaData.TimeStats["03-fetch-leaves-info"] = strconv.FormatInt(metaData.leavesFetchTime, 10)
|
||||
}
|
||||
|
||||
type GetSliceResponseMetadata struct {
|
||||
TimeStats map[string]string `json:"timeStats"` // stem, state, storage (one by one)
|
||||
NodeStats map[string]string `json:"nodeStats"` // total, leaves, smart contracts
|
||||
}
|
||||
|
||||
type GetSliceResponseTrieNodes struct {
|
||||
Stem map[string]string `json:"stem"` // key: Keccak256Hash(data) in hex, value: trie node data in hex
|
||||
Head map[string]string `json:"head"`
|
||||
Slice map[string]string `json:"sliceNodes"`
|
||||
}
|
||||
|
||||
type GetSliceResponseAccount struct {
|
||||
StorageRoot string `json:"storageRoot"`
|
||||
EVMCode string `json:"evmCode"`
|
||||
}
|
||||
|
||||
type metaDataFields struct {
|
||||
maxDepth int
|
||||
leafCount int
|
||||
trieLoadingTime int64
|
||||
stemNodesFetchTime int64
|
||||
sliceNodesFetchTime int64
|
||||
leavesFetchTime int64
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
@ -43,12 +42,12 @@ type IPFSBlockResponse struct {
|
||||
}
|
||||
|
||||
type EthTransactionCIDResponse struct {
|
||||
CID string `json:"cid"`
|
||||
TxHash string `json:"txHash"`
|
||||
Index int32 `json:"index"`
|
||||
Src string `json:"src"`
|
||||
Dst string `json:"dst"`
|
||||
BlockByCid IPFSBlockResponse `json:"blockByCid"`
|
||||
CID string `json:"cid"`
|
||||
TxHash string `json:"txHash"`
|
||||
Index int32 `json:"index"`
|
||||
Src string `json:"src"`
|
||||
Dst string `json:"dst"`
|
||||
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
|
||||
}
|
||||
|
||||
type EthTransactionCIDByTxHash struct {
|
||||
@ -72,7 +71,7 @@ type EthHeaderCIDResponse struct {
|
||||
UncleRoot string `json:"uncleRoot"`
|
||||
Bloom string `json:"bloom"`
|
||||
EthTransactionCIDsByHeaderId EthTransactionCIDsByHeaderIdResponse `json:"ethTransactionCidsByHeaderId"`
|
||||
BlockByCid IPFSBlockResponse `json:"blockByCid"`
|
||||
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
|
||||
}
|
||||
|
||||
type AllEthHeaderCIDsResponse struct {
|
||||
@ -92,16 +91,10 @@ func NewClient(endpoint string) *Client {
|
||||
return &Client{client: client}
|
||||
}
|
||||
|
||||
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, addresses []common.Address) ([]LogResponse, error) {
|
||||
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
|
||||
params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
|
||||
|
||||
if addresses != nil {
|
||||
addressStrings := make([]string, len(addresses))
|
||||
for i, address := range addresses {
|
||||
addressStrings[i] = fmt.Sprintf(`"%s"`, address.String())
|
||||
}
|
||||
|
||||
params += fmt.Sprintf(`, addresses: [%s]`, strings.Join(addressStrings, ","))
|
||||
if address != nil {
|
||||
params += fmt.Sprintf(`, contract: "%s"`, address.String())
|
||||
}
|
||||
|
||||
getLogsQuery := fmt.Sprintf(`query{
|
||||
@ -195,7 +188,7 @@ func (c *Client) AllEthHeaderCIDs(ctx context.Context, condition EthHeaderCIDCon
|
||||
receiptRoot
|
||||
uncleRoot
|
||||
bloom
|
||||
blockByCid {
|
||||
blockByMhKey {
|
||||
key
|
||||
data
|
||||
}
|
||||
@ -244,7 +237,7 @@ func (c *Client) EthTransactionCIDByTxHash(ctx context.Context, txHash string) (
|
||||
index
|
||||
src
|
||||
dst
|
||||
blockByCid {
|
||||
blockByMhKey {
|
||||
data
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,19 +23,19 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth/filters"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
state "github.com/cerc-io/ipld-eth-statedb/direct_by_leaf"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -51,7 +51,7 @@ type Account struct {
|
||||
|
||||
// getState fetches the StateDB object for an account.
|
||||
func (a *Account) getState(ctx context.Context) (*state.StateDB, error) {
|
||||
state, _, err := a.backend.IPLDDirectStateDBAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)
|
||||
state, _, err := a.backend.StateAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)
|
||||
return state, err
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {
|
||||
if err != nil {
|
||||
return hexutil.Big{}, err
|
||||
}
|
||||
return hexutil.Big(*state.GetBalance(a.address).ToBig()), nil
|
||||
return hexutil.Big(*state.GetBalance(a.address)), nil
|
||||
}
|
||||
|
||||
func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {
|
||||
@ -243,7 +243,10 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account,
|
||||
if err != nil || tx == nil {
|
||||
return nil, err
|
||||
}
|
||||
signer := eth.SignerForTx(tx)
|
||||
var signer types.Signer = types.HomesteadSigner{}
|
||||
if tx.Protected() {
|
||||
signer = types.NewEIP155Signer(tx.ChainId())
|
||||
}
|
||||
from, _ := types.Sender(signer, tx)
|
||||
|
||||
return &Account{
|
||||
@ -773,8 +776,8 @@ func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteri
|
||||
hash = header.Hash()
|
||||
}
|
||||
// Construct the range filter
|
||||
filterSys := filters.NewFilterSystem(b.backend, filters.Config{})
|
||||
filter := filterSys.NewBlockFilter(hash, addresses, topics)
|
||||
filter := filters.NewBlockFilter(b.backend, hash, addresses, topics)
|
||||
|
||||
// Run the filter and return all the logs
|
||||
return runFilter(ctx, b.backend, filter)
|
||||
}
|
||||
@ -834,7 +837,7 @@ func (b *Block) Call(ctx context.Context, args struct {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap())
|
||||
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap().Uint64())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -976,8 +979,7 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria
|
||||
topics = *args.Filter.Topics
|
||||
}
|
||||
// Construct the range filter
|
||||
filterSys := filters.NewFilterSystem(r.backend, filters.Config{})
|
||||
filter := filterSys.NewRangeFilter(begin, end, addresses, topics)
|
||||
filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
|
||||
return runFilter(ctx, r.backend, filter)
|
||||
}
|
||||
|
||||
@ -1005,41 +1007,40 @@ func (r *Resolver) GetStorageAt(ctx context.Context, args struct {
|
||||
Contract common.Address
|
||||
Slot common.Hash
|
||||
}) (*StorageResult, error) {
|
||||
cid, nodeRLP, err := r.backend.Retriever.RetrieveStorageAndRLP(args.Contract, args.Slot, args.BlockHash)
|
||||
cid, ipldBlock, rlpValue, err := r.backend.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(args.Contract, args.Slot, args.BlockHash)
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return &StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}, nil
|
||||
ret := StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
valueRLP, err := eth.DecodeLeafNode(nodeRLP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytes.Equal(valueRLP, eth.EmptyNodeValue) {
|
||||
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: nodeRLP}, nil
|
||||
|
||||
if bytes.Equal(rlpValue, eth.EmptyNodeValue) {
|
||||
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: ipldBlock}, nil
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
err = rlp.DecodeBytes(valueRLP, &value)
|
||||
err = rlp.DecodeBytes(rlpValue, &value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &StorageResult{value: value.([]byte), cid: cid, ipldBlock: nodeRLP}, nil
|
||||
ret := StorageResult{value: value.([]byte), cid: cid, ipldBlock: ipldBlock}
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (r *Resolver) GetLogs(ctx context.Context, args struct {
|
||||
BlockHash common.Hash
|
||||
BlockNumber *BigInt
|
||||
Addresses *[]common.Address
|
||||
BlockHash common.Hash
|
||||
Contract *common.Address
|
||||
}) (*[]*Log, error) {
|
||||
var filter eth.ReceiptFilter
|
||||
|
||||
if args.Addresses != nil {
|
||||
filter.LogAddresses = make([]string, len(*args.Addresses))
|
||||
for i, address := range *args.Addresses {
|
||||
filter.LogAddresses[i] = address.String()
|
||||
}
|
||||
var filter eth.ReceiptFilter
|
||||
if args.Contract != nil {
|
||||
filter.LogAddresses = []string{args.Contract.String()}
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
@ -1048,7 +1049,7 @@ func (r *Resolver) GetLogs(ctx context.Context, args struct {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash, args.BlockNumber.ToInt())
|
||||
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1153,7 +1154,7 @@ func (t EthTransactionCID) Dst(ctx context.Context) string {
|
||||
return t.dst
|
||||
}
|
||||
|
||||
func (t EthTransactionCID) BlockByCid(ctx context.Context) IPFSBlock {
|
||||
func (t EthTransactionCID) BlockByMhKey(ctx context.Context) IPFSBlock {
|
||||
return t.ipfsBlock
|
||||
}
|
||||
|
||||
@ -1242,7 +1243,7 @@ func (h EthHeaderCID) EthTransactionCidsByHeaderId(ctx context.Context) EthTrans
|
||||
return EthTransactionCIDsConnection{nodes: h.transactions}
|
||||
}
|
||||
|
||||
func (h EthHeaderCID) BlockByCid(ctx context.Context) IPFSBlock {
|
||||
func (h EthHeaderCID) BlockByMhKey(ctx context.Context) IPFSBlock {
|
||||
return h.ipfsBlock
|
||||
}
|
||||
|
||||
@ -1265,14 +1266,12 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
var headerCIDs []eth.HeaderCIDRecord
|
||||
var err error
|
||||
if args.Condition.BlockHash != nil {
|
||||
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash), args.Condition.BlockNumber.ToInt())
|
||||
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
headerCIDs = append(headerCIDs, headerCID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headerCIDs = append(headerCIDs, headerCID)
|
||||
} else if args.Condition.BlockNumber != nil {
|
||||
headerCIDs, err = r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(args.Condition.BlockNumber.ToInt().Int64())
|
||||
if err != nil {
|
||||
@ -1282,6 +1281,22 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
return nil, fmt.Errorf("provide block number or block hash")
|
||||
}
|
||||
|
||||
// Begin tx
|
||||
tx, err := r.backend.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
shared.Rollback(tx)
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
shared.Rollback(tx)
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
var resultNodes []*EthHeaderCID
|
||||
for _, headerCID := range headerCIDs {
|
||||
var blockNumber BigInt
|
||||
@ -1303,7 +1318,7 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
td: td,
|
||||
txRoot: headerCID.TxRoot,
|
||||
receiptRoot: headerCID.RctRoot,
|
||||
uncleRoot: headerCID.UnclesHash,
|
||||
uncleRoot: headerCID.UncleRoot,
|
||||
bloom: Bytes(headerCID.Bloom).String(),
|
||||
ipfsBlock: IPFSBlock{
|
||||
key: headerCID.IPLD.Key,
|
||||
@ -1330,12 +1345,9 @@ func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||
}
|
||||
|
||||
func (r *Resolver) EthTransactionCidByTxHash(ctx context.Context, args struct {
|
||||
TxHash string
|
||||
BlockNumber *BigInt
|
||||
TxHash string
|
||||
}) (*EthTransactionCID, error) {
|
||||
// Need not check args.BlockNumber for nil as .ToInt() uses a pointer receiver and returns nil if BlockNumber is nil
|
||||
// https://stackoverflow.com/questions/42238624/calling-a-method-on-a-nil-struct-pointer-doesnt-panic-why-not
|
||||
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash, args.BlockNumber.ToInt())
|
||||
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -17,13 +17,19 @@
|
||||
package graphql_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestGraphQL(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "graphql test suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
||||
|
||||
@ -22,10 +22,6 @@ import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
statediff "github.com/cerc-io/plugeth-statediff"
|
||||
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -34,148 +30,151 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||
"github.com/jmoiron/sqlx"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth/test_helpers"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/graphql"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth/test_helpers"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/graphql"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
const (
|
||||
gqlEndPoint = "127.0.0.1:8083"
|
||||
)
|
||||
|
||||
var (
|
||||
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||
blocks []*types.Block
|
||||
receipts []types.Receipts
|
||||
chain *core.BlockChain
|
||||
db *sqlx.DB
|
||||
backend *eth.Backend
|
||||
graphQLServer *graphql.Service
|
||||
chainConfig = &*params.TestChainConfig
|
||||
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
|
||||
mockTD = big.NewInt(1337)
|
||||
ctx = context.Background()
|
||||
nonCanonBlockHash common.Hash
|
||||
nonCanonContractAddress common.Address
|
||||
)
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
db = shared.SetupDB()
|
||||
|
||||
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000),
|
||||
GroupCacheConfig: &shared.GroupCacheConfig{
|
||||
StateDB: shared.GroupConfig{
|
||||
Name: "graphql_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// make the test blockchain (and state)
|
||||
chainConfig.LondonBlock = big.NewInt(100)
|
||||
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen, chainConfig)
|
||||
indexer := shared.SetupTestStateDiffIndexer(context.Background(), chainConfig, test_helpers.Genesis.Hash())
|
||||
builder := statediff.NewBuilder(adapt.GethStateView(chain.StateCache()))
|
||||
|
||||
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||
nonCanonBlockHash = test_helpers.MockBlock.Hash()
|
||||
nonCanonContractAddress = test_helpers.ContractAddr
|
||||
|
||||
tx, err := indexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The non-canonical header has a child
|
||||
tx, err = indexer.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
ipld := sdtypes.IPLD{
|
||||
CID: ipld.Keccak256ToCid(ipld.RawBinary, test_helpers.CodeHash.Bytes()).String(),
|
||||
Content: test_helpers.ContractCode,
|
||||
}
|
||||
err = indexer.PushIPLD(tx, ipld)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||
for i, block := range blocks {
|
||||
var args statediff.Args
|
||||
var rcts types.Receipts
|
||||
if i == 0 {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: common.Hash{},
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
} else {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: blocks[i-1].Root(),
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
rcts = receipts[i-1]
|
||||
}
|
||||
|
||||
tx, err = indexer.PushBlock(block, rcts, mockTD)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
diff, err := builder.BuildStateDiffObject(args, statediff.Params{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, node := range diff.Nodes {
|
||||
err = indexer.PushStateNode(tx, node, block.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, ipld := range diff.IPLDs {
|
||||
err = indexer.PushIPLD(tx, ipld)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = graphQLServer.Start(nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
err := graphQLServer.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
shared.TearDownDB(db)
|
||||
chain.Stop()
|
||||
})
|
||||
|
||||
var _ = Describe("GraphQL", func() {
|
||||
const (
|
||||
gqlEndPoint = "127.0.0.1:8083"
|
||||
)
|
||||
var (
|
||||
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||
blocks []*types.Block
|
||||
receipts []types.Receipts
|
||||
chain *core.BlockChain
|
||||
db *sqlx.DB
|
||||
blockHashes []common.Hash
|
||||
backend *eth.Backend
|
||||
graphQLServer *graphql.Service
|
||||
chainConfig = params.TestChainConfig
|
||||
mockTD = big.NewInt(1337)
|
||||
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
|
||||
ctx = context.Background()
|
||||
blockHash common.Hash
|
||||
contractAddress common.Address
|
||||
)
|
||||
|
||||
It("test init", func() {
|
||||
var err error
|
||||
db = shared.SetupDB()
|
||||
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||
ChainConfig: chainConfig,
|
||||
VMConfig: vm.Config{},
|
||||
RPCGasCap: big.NewInt(10000000000),
|
||||
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||
StateDB: ethServerShared.GroupConfig{
|
||||
Name: "graphql_test",
|
||||
CacheSizeInMB: 8,
|
||||
CacheExpiryInMins: 60,
|
||||
LogStatsIntervalInSecs: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// make the test blockchain (and state)
|
||||
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
|
||||
params := statediff.Params{
|
||||
IntermediateStateNodes: true,
|
||||
IntermediateStorageNodes: true,
|
||||
}
|
||||
|
||||
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||
builder := statediff.NewBuilder(chain.StateCache())
|
||||
for i, block := range blocks {
|
||||
blockHashes = append(blockHashes, block.Hash())
|
||||
var args statediff.Args
|
||||
var rcts types.Receipts
|
||||
if i == 0 {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: common.Hash{},
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
} else {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: blocks[i-1].Root(),
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
rcts = receipts[i-1]
|
||||
}
|
||||
|
||||
var diff sdtypes.StateObject
|
||||
diff, err = builder.BuildStateDiffObject(args, params)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
tx, err := transformer.PushBlock(block, rcts, mockTD)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, node := range diff.Nodes {
|
||||
err = transformer.PushStateNode(tx, node, block.Hash().String())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||
|
||||
blockHash = test_helpers.MockBlock.Hash()
|
||||
contractAddress = test_helpers.ContractAddr
|
||||
|
||||
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The non-canonical header has a child
|
||||
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ccHash := sdtypes.CodeAndCodeHash{
|
||||
Hash: test_helpers.CodeHash,
|
||||
Code: test_helpers.ContractCode,
|
||||
}
|
||||
|
||||
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = tx.Submit(err)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = graphQLServer.Start(nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
defer It("test teardown", func() {
|
||||
err := graphQLServer.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
shared.TearDownDB(db)
|
||||
chain.Stop()
|
||||
})
|
||||
|
||||
Describe("eth_getLogs", func() {
|
||||
It("Retrieves logs that matches the provided blockHash and contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{nonCanonContractAddress})
|
||||
logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
@ -192,7 +191,7 @@ var _ = Describe("GraphQL", func() {
|
||||
})
|
||||
|
||||
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{test_helpers.AnotherAddress2})
|
||||
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
@ -208,38 +207,14 @@ var _ = Describe("GraphQL", func() {
|
||||
Expect(logs).To(Equal(expectedLogs))
|
||||
})
|
||||
|
||||
It("Retrieves logs that matches the provided blockHash and multiple contract addresses", func() {
|
||||
logs, err := client.GetLogs(ctx, nonCanonBlockHash, []common.Address{nonCanonContractAddress, test_helpers.AnotherAddress2})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
expectedLogs := []graphql.LogResponse{
|
||||
{
|
||||
Topics: test_helpers.MockLog1.Topics,
|
||||
Data: hexutil.Bytes(test_helpers.MockLog1.Data),
|
||||
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[0].Hash()},
|
||||
ReceiptCID: test_helpers.Rct1CID.String(),
|
||||
Status: int32(test_helpers.MockReceipts[0].Status),
|
||||
},
|
||||
{
|
||||
Topics: test_helpers.MockLog6.Topics,
|
||||
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
|
||||
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[3].Hash()},
|
||||
ReceiptCID: test_helpers.Rct4CID.String(),
|
||||
Status: int32(test_helpers.MockReceipts[3].Status),
|
||||
},
|
||||
}
|
||||
|
||||
Expect(logs).To(Equal(expectedLogs))
|
||||
})
|
||||
|
||||
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
|
||||
logs, err := client.GetLogs(ctx, nonCanonBlockHash, nil)
|
||||
logs, err := client.GetLogs(ctx, blockHash, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(logs)).To(Equal(6))
|
||||
})
|
||||
|
||||
It("Retrieves logs with random hash", func() {
|
||||
logs, err := client.GetLogs(ctx, randomHash, []common.Address{nonCanonContractAddress})
|
||||
logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(logs)).To(Equal(0))
|
||||
})
|
||||
@ -247,31 +222,31 @@ var _ = Describe("GraphQL", func() {
|
||||
|
||||
Describe("eth_getStorageAt", func() {
|
||||
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash", func() {
|
||||
storageRes, err := client.GetStorageAt(ctx, blocks[2].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
|
||||
storageRes, err := client.GetStorageAt(ctx, blockHashes[2], contractAddress, test_helpers.IndexOne)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.HexToHash("01")))
|
||||
|
||||
storageRes, err = client.GetStorageAt(ctx, blocks[3].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
|
||||
storageRes, err = client.GetStorageAt(ctx, blockHashes[3], contractAddress, test_helpers.IndexOne)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.HexToHash("03")))
|
||||
|
||||
storageRes, err = client.GetStorageAt(ctx, blocks[4].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
|
||||
storageRes, err = client.GetStorageAt(ctx, blockHashes[4], contractAddress, test_helpers.IndexOne)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.HexToHash("09")))
|
||||
})
|
||||
|
||||
It("Retrieves empty data if it tries to access a contract at a blockHash which does not exist", func() {
|
||||
storageRes, err := client.GetStorageAt(ctx, blocks[0].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
|
||||
storageRes, err := client.GetStorageAt(ctx, blockHashes[0], contractAddress, test_helpers.IndexOne)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||
|
||||
storageRes, err = client.GetStorageAt(ctx, blocks[1].Hash(), nonCanonContractAddress, test_helpers.IndexOne)
|
||||
storageRes, err = client.GetStorageAt(ctx, blockHashes[1], contractAddress, test_helpers.IndexOne)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||
})
|
||||
|
||||
It("Retrieves empty data if it tries to access a contract slot which does not exist", func() {
|
||||
storageRes, err := client.GetStorageAt(ctx, blocks[3].Hash(), nonCanonContractAddress, randomHash.Hex())
|
||||
storageRes, err := client.GetStorageAt(ctx, blockHashes[3], contractAddress, randomHash.Hex())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||
})
|
||||
@ -297,7 +272,7 @@ var _ = Describe("GraphQL", func() {
|
||||
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockHash: &blockHash})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash(), nil)
|
||||
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(allEthHeaderCIDsResp.Nodes)).To(Equal(1))
|
||||
@ -312,12 +287,12 @@ var _ = Describe("GraphQL", func() {
|
||||
ethTransactionCIDResp, err := client.EthTransactionCIDByTxHash(ctx, txHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash, nil)
|
||||
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compareEthTxCID(*ethTransactionCIDResp, txCID)
|
||||
|
||||
Expect(ethTransactionCIDResp.BlockByCid.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
|
||||
Expect(ethTransactionCIDResp.BlockByMhKey.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -338,7 +313,7 @@ func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID et
|
||||
Expect(ethHeaderCID.Td).To(Equal(*new(graphql.BigInt).SetUint64(uint64(td))))
|
||||
Expect(ethHeaderCID.TxRoot).To(Equal(headerCID.TxRoot))
|
||||
Expect(ethHeaderCID.ReceiptRoot).To(Equal(headerCID.RctRoot))
|
||||
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UnclesHash))
|
||||
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UncleRoot))
|
||||
Expect(ethHeaderCID.Bloom).To(Equal(graphql.Bytes(headerCID.Bloom).String()))
|
||||
|
||||
for tIdx, txCID := range headerCID.TransactionCIDs {
|
||||
@ -346,8 +321,8 @@ func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID et
|
||||
compareEthTxCID(ethTxCID, txCID)
|
||||
}
|
||||
|
||||
Expect(ethHeaderCID.BlockByCid.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
|
||||
Expect(ethHeaderCID.BlockByCid.Key).To(Equal(headerCID.IPLD.Key))
|
||||
Expect(ethHeaderCID.BlockByMhKey.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
|
||||
Expect(ethHeaderCID.BlockByMhKey.Key).To(Equal(headerCID.IPLD.Key))
|
||||
}
|
||||
|
||||
func compareEthTxCID(ethTxCID graphql.EthTransactionCIDResponse, txCID eth.TransactionCIDRecord) {
|
||||
|
||||
@ -292,7 +292,7 @@ const schema string = `
|
||||
index: Int!
|
||||
src: String!
|
||||
dst: String!
|
||||
blockByCid: IPFSBlock!
|
||||
blockByMhKey: IPFSBlock!
|
||||
}
|
||||
|
||||
type EthTransactionCidsConnection {
|
||||
@ -317,7 +317,7 @@ const schema string = `
|
||||
uncleRoot: String!
|
||||
bloom: String!
|
||||
ethTransactionCidsByHeaderId: EthTransactionCidsConnection!
|
||||
blockByCid: IPFSBlock!
|
||||
blockByMhKey: IPFSBlock!
|
||||
}
|
||||
|
||||
type EthHeaderCidsConnection {
|
||||
@ -343,12 +343,12 @@ const schema string = `
|
||||
getStorageAt(blockHash: Bytes32!, contract: Address!, slot: Bytes32!): StorageResult
|
||||
|
||||
# Get contract logs by block hash and contract address.
|
||||
getLogs(blockHash: Bytes32!, blockNumber: BigInt, addresses: [Address!]): [Log!]
|
||||
getLogs(blockHash: Bytes32!, contract: Address): [Log!]
|
||||
|
||||
# PostGraphile alternative to get headers with transactions using block number or block hash.
|
||||
allEthHeaderCids(condition: EthHeaderCidCondition): EthHeaderCidsConnection
|
||||
|
||||
# PostGraphile alternative to get transactions using transaction hash.
|
||||
ethTransactionCidByTxHash(txHash: String!, blockNumber: BigInt): EthTransactionCid
|
||||
ethTransactionCidByTxHash(txHash: String!): EthTransactionCid
|
||||
}
|
||||
`
|
||||
|
||||
@ -27,9 +27,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/graph-gophers/graphql-go"
|
||||
"github.com/graph-gophers/graphql-go/relay"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// Service encapsulates a GraphQL service.
|
||||
@ -76,12 +76,12 @@ func (s *Service) Start(server *p2p.Server) error {
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
extapiURL := fmt.Sprintf("http://%v", addr)
|
||||
log.Infof("GraphQL endpoint opened at %s", extapiURL)
|
||||
extapiURL := fmt.Sprintf("http://%v/", addr)
|
||||
logrus.Infof("graphQL endpoint opened for url %s", extapiURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewHandler returns a new `http.Handler` that will answer GraphQL queries.
|
||||
// newHandler returns a new `http.Handler` that will answer GraphQL queries.
|
||||
// It additionally exports an interactive query browser on the / endpoint.
|
||||
func NewHandler(backend *eth.Backend) (http.Handler, error) {
|
||||
q := Resolver{backend}
|
||||
@ -105,7 +105,7 @@ func (s *Service) Stop() error {
|
||||
if s.listener != nil {
|
||||
s.listener.Close()
|
||||
s.listener = nil
|
||||
log.Debugf("graphQL endpoint closed for url %s", fmt.Sprintf("http://%s", s.endpoint))
|
||||
logrus.Debugf("graphQL endpoint closed for url %s", fmt.Sprintf("http://%s", s.endpoint))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
244
pkg/log/log.go
244
pkg/log/log.go
@ -1,244 +0,0 @@
|
||||
// Copyright © 2023 Cerc
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
CtxKeyApiMethod = "api_method"
|
||||
CtxKeyApiParams = "api_params"
|
||||
CtxKeyApiReqId = "api_reqid"
|
||||
CtxKeyBlockHash = "block_hash"
|
||||
CtxKeyBlockNumber = "block_num"
|
||||
CtxKeyConn = "conn"
|
||||
CtxKeyDuration = "duration"
|
||||
CtxKeyError = "err"
|
||||
CtxKeyUniqId = "uuid"
|
||||
CtxKeyUserId = "user_id"
|
||||
)
|
||||
|
||||
// TODO: Allow registering arbitrary keys.
|
||||
var registeredKeys = []string{
|
||||
CtxKeyApiMethod,
|
||||
CtxKeyApiParams,
|
||||
CtxKeyApiReqId,
|
||||
CtxKeyBlockHash,
|
||||
CtxKeyBlockNumber,
|
||||
CtxKeyConn,
|
||||
CtxKeyDuration,
|
||||
CtxKeyError,
|
||||
CtxKeyUniqId,
|
||||
CtxKeyUserId,
|
||||
}
|
||||
|
||||
const FatalLevel = logrus.FatalLevel
|
||||
const ErrorLevel = logrus.ErrorLevel
|
||||
const InfoLevel = logrus.InfoLevel
|
||||
const DebugLevel = logrus.DebugLevel
|
||||
const TraceLevel = logrus.TraceLevel
|
||||
|
||||
type Entry = logrus.Entry
|
||||
type Level = logrus.Level
|
||||
|
||||
func WithFieldsFromContext(ctx context.Context) *Entry {
|
||||
entry := logrus.WithContext(ctx)
|
||||
|
||||
for _, key := range registeredKeys {
|
||||
if value := ctx.Value(key); value != nil {
|
||||
entry = entry.WithField(key, value)
|
||||
}
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
func Fatalx(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Fatal(args...)
|
||||
}
|
||||
|
||||
func Errorx(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Error(args...)
|
||||
}
|
||||
|
||||
func Warnx(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Warn(args...)
|
||||
}
|
||||
|
||||
func Infox(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Info(args...)
|
||||
}
|
||||
|
||||
func Debugx(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Debug(args...)
|
||||
}
|
||||
|
||||
func Tracex(ctx context.Context, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Trace(args...)
|
||||
}
|
||||
|
||||
func Errorxf(ctx context.Context, format string, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Errorf(format, args...)
|
||||
}
|
||||
|
||||
func Warnxf(ctx context.Context, format string, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Warnf(format, args...)
|
||||
}
|
||||
|
||||
func Infoxf(ctx context.Context, format string, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Infof(format, args...)
|
||||
}
|
||||
|
||||
func Debugxf(ctx context.Context, format string, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Debugf(format, args...)
|
||||
}
|
||||
|
||||
func Tracexf(ctx context.Context, format string, args ...interface{}) {
|
||||
WithFieldsFromContext(ctx).Tracef(format, args...)
|
||||
}
|
||||
|
||||
func Fatal(args ...interface{}) {
|
||||
logrus.Fatal(args...)
|
||||
}
|
||||
|
||||
func Error(args ...interface{}) {
|
||||
logrus.Error(args...)
|
||||
}
|
||||
|
||||
func Warn(args ...interface{}) {
|
||||
logrus.Warn(args...)
|
||||
}
|
||||
|
||||
func Info(args ...interface{}) {
|
||||
logrus.Info(args...)
|
||||
}
|
||||
|
||||
func Debug(args ...interface{}) {
|
||||
logrus.Debug(args...)
|
||||
}
|
||||
|
||||
func Trace(args ...interface{}) {
|
||||
logrus.Trace(args...)
|
||||
}
|
||||
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logrus.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
logrus.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
logrus.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func Infof(format string, args ...interface{}) {
|
||||
logrus.Infof(format, args...)
|
||||
}
|
||||
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
logrus.Debugf(format, args...)
|
||||
}
|
||||
|
||||
func Tracef(format string, args ...interface{}) {
|
||||
logrus.Tracef(format, args...)
|
||||
}
|
||||
|
||||
func SetOutput(out io.Writer) {
|
||||
logrus.SetOutput(out)
|
||||
}
|
||||
|
||||
func SetLevel(lvl Level) {
|
||||
logrus.SetLevel(lvl)
|
||||
}
|
||||
|
||||
func IsLevelEnabled(lvl Level) bool {
|
||||
return logrus.IsLevelEnabled(lvl)
|
||||
}
|
||||
|
||||
func WithError(err error) *Entry {
|
||||
return logrus.WithError(err)
|
||||
}
|
||||
|
||||
func WithField(field string, value interface{}) *Entry {
|
||||
return logrus.WithField(field, value)
|
||||
}
|
||||
|
||||
func Init() error {
|
||||
// Set the output.
|
||||
logFile := viper.GetString("log.file")
|
||||
if logFile != "" {
|
||||
file, err := os.OpenFile(logFile,
|
||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640)
|
||||
if err == nil {
|
||||
Infof("Directing output to %s", logFile)
|
||||
SetOutput(file)
|
||||
} else {
|
||||
SetOutput(os.Stdout)
|
||||
Info("Failed to logrus.to file, using default stdout")
|
||||
}
|
||||
} else {
|
||||
SetOutput(os.Stdout)
|
||||
}
|
||||
|
||||
// Set the level.
|
||||
lvl, err := logrus.ParseLevel(viper.GetString("log.level"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
SetLevel(lvl)
|
||||
|
||||
formatter := &logrus.TextFormatter{
|
||||
FullTimestamp: true,
|
||||
}
|
||||
// Show file/line number only at Trace level.
|
||||
if lvl >= TraceLevel {
|
||||
logrus.SetReportCaller(true)
|
||||
|
||||
// We need to exclude this wrapper code, logrus.us itself, and the runtime from the stack to show anything useful.
|
||||
// cf. https://github.com/sirupsen/logrus.us/pull/973
|
||||
formatter.CallerPrettyfier = func(frame *runtime.Frame) (function string, file string) {
|
||||
pcs := make([]uintptr, 50)
|
||||
_ = runtime.Callers(0, pcs)
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
|
||||
// Filter logrus.wrapper / logrus.us / runtime frames.
|
||||
for next, again := frames.Next(); again; next, again = frames.Next() {
|
||||
if !strings.Contains(next.File, "sirupsen/logrus.us") &&
|
||||
!strings.HasPrefix(next.Function, "runtime.") &&
|
||||
!strings.Contains(next.File, "ipld-eth-server/pkg/log") {
|
||||
return next.Function, fmt.Sprintf("%s:%d", next.File, next.Line)
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to the raw info.
|
||||
return frame.Function, fmt.Sprintf("%s:%d", frame.File, frame.Line)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.SetFormatter(formatter)
|
||||
Info("Log level set to ", lvl.String())
|
||||
return nil
|
||||
}
|
||||
@ -17,10 +17,10 @@
|
||||
package net_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/net"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/net"
|
||||
)
|
||||
|
||||
var _ = Describe("API", func() {
|
||||
|
||||
@ -20,9 +20,9 @@ import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNetSuite(t *testing.T) {
|
||||
@ -31,5 +31,5 @@ func TestNetSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
||||
|
||||
@ -17,107 +17,25 @@
|
||||
package prom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
const (
|
||||
jsonMethod = "method"
|
||||
jsonParams = "params"
|
||||
jsonReqId = "id"
|
||||
headerUserId = "X-User-Id"
|
||||
headerOriginalRemoteAddr = "X-Original-Remote-Addr"
|
||||
)
|
||||
|
||||
// Peek at the request and update the Context accordingly (eg, API method, user ID, etc.)
|
||||
func preprocessRequest(r *http.Request) (*http.Request, error) {
|
||||
// Generate a unique ID for this request.
|
||||
uniqId, err := uuid.NewUUID()
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read the body so that we can peek inside.
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Replace it with a re-readable copy.
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
// All API requests should be JSON.
|
||||
var result map[string]interface{}
|
||||
if len(body) > 0 {
|
||||
err = json.Unmarshal(body, &result)
|
||||
if nil != err {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Pull out the method name, request ID, user ID, and address info.
|
||||
reqId := fmt.Sprintf("%g", result[jsonReqId])
|
||||
reqMethod := fmt.Sprintf("%v", result[jsonMethod])
|
||||
reqParams := fmt.Sprintf("%v", result[jsonParams])
|
||||
// Truncate parameters unless trace logging is enabled.
|
||||
if !log.IsLevelEnabled(log.TraceLevel) {
|
||||
if len(reqParams) > 250 {
|
||||
reqParams = reqParams[:250] + "..."
|
||||
}
|
||||
}
|
||||
userId := r.Header.Get(headerUserId)
|
||||
conn := r.Header.Get(headerOriginalRemoteAddr)
|
||||
if len(conn) == 0 {
|
||||
conn = r.RemoteAddr
|
||||
}
|
||||
|
||||
// Add it all to the request context.
|
||||
ctx := r.Context()
|
||||
ctx = context.WithValue(ctx, log.CtxKeyUniqId, uniqId.String())
|
||||
ctx = context.WithValue(ctx, log.CtxKeyApiMethod, reqMethod)
|
||||
ctx = context.WithValue(ctx, log.CtxKeyApiParams, string(reqParams))
|
||||
ctx = context.WithValue(ctx, log.CtxKeyApiReqId, reqId)
|
||||
ctx = context.WithValue(ctx, log.CtxKeyUserId, userId)
|
||||
ctx = context.WithValue(ctx, log.CtxKeyConn, conn)
|
||||
|
||||
return r.WithContext(ctx), nil
|
||||
}
|
||||
|
||||
// HTTPMiddleware http connection metric reader
|
||||
func HTTPMiddleware(next http.Handler) http.Handler {
|
||||
if !metrics {
|
||||
return next
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
httpCount.Inc()
|
||||
|
||||
start := time.Now()
|
||||
r, err := preprocessRequest(r)
|
||||
if nil != err {
|
||||
log.WithError(err).Error("Error preprocessing request")
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
apiMethod := fmt.Sprintf("%s", ctx.Value(log.CtxKeyApiMethod))
|
||||
|
||||
if metrics {
|
||||
httpCount.WithLabelValues(apiMethod).Inc()
|
||||
}
|
||||
|
||||
log.Debugx(ctx, "START")
|
||||
next.ServeHTTP(w, r)
|
||||
duration := time.Now().Sub(start)
|
||||
log.Debugxf(context.WithValue(ctx, log.CtxKeyDuration, duration.Milliseconds()), "END")
|
||||
|
||||
if metrics {
|
||||
httpDuration.WithLabelValues(apiMethod).Observe(duration.Seconds())
|
||||
}
|
||||
httpDuration.Observe(float64(duration.Seconds()))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -33,8 +33,8 @@ const (
|
||||
var (
|
||||
metrics bool
|
||||
|
||||
httpCount *prometheus.CounterVec
|
||||
httpDuration *prometheus.HistogramVec
|
||||
httpCount prometheus.Counter
|
||||
httpDuration prometheus.Histogram
|
||||
wsCount prometheus.Gauge
|
||||
ipcCount prometheus.Gauge
|
||||
)
|
||||
@ -43,19 +43,18 @@ var (
|
||||
func Init() {
|
||||
metrics = true
|
||||
|
||||
httpCount = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
httpCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystemHTTP,
|
||||
Name: "count",
|
||||
Help: "http request count",
|
||||
}, []string{"method"})
|
||||
|
||||
httpDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
})
|
||||
httpDuration = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystemHTTP,
|
||||
Name: "duration",
|
||||
Help: "http request duration",
|
||||
}, []string{"method"})
|
||||
})
|
||||
|
||||
wsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
|
||||
@ -20,8 +20,8 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errPromHTTP = errors.New("can't start http server for prometheus")
|
||||
@ -36,7 +36,7 @@ func Serve(addr string) *http.Server {
|
||||
}
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.
|
||||
logrus.
|
||||
WithError(err).
|
||||
WithField("module", "prom").
|
||||
WithField("addr", addr).
|
||||
|
||||
@ -19,31 +19,29 @@ package rpc
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
|
||||
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
||||
|
||||
srv := rpc.NewServer()
|
||||
err := node.RegisterApis(apis, modules, srv)
|
||||
err := node.RegisterApis(apis, modules, srv, false)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not register HTTP API: %w", err)
|
||||
}
|
||||
handler := prom.HTTPMiddleware(node.NewHTTPHandlerStack(srv, cors, vhosts, nil))
|
||||
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
|
||||
|
||||
// start http server
|
||||
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not start RPC api: %v", err)
|
||||
}
|
||||
extapiURL := fmt.Sprintf("http://%s", addr)
|
||||
log.Infof("HTTP endpoint opened at %s", extapiURL)
|
||||
extapiURL := fmt.Sprintf("http://%v/", addr)
|
||||
log.Infof("HTTP endpoint opened %s", extapiURL)
|
||||
|
||||
return srv, err
|
||||
}
|
||||
|
||||
@ -22,11 +22,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@ -24,12 +24,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/prom"
|
||||
)
|
||||
|
||||
// StartWSEndpoint starts a websocket endpoint.
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigins []string) (net.Listener, *rpc.Server, error) {
|
||||
func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigins []string, exposeAll bool) (net.Listener, *rpc.Server, error) {
|
||||
// All APIs registered, start the HTTP listener
|
||||
var (
|
||||
listener net.Listener
|
||||
@ -38,7 +37,7 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
|
||||
|
||||
// Register all the APIs exposed by the services
|
||||
handler := rpc.NewServer()
|
||||
err = node.RegisterApis(apis, modules, handler)
|
||||
err = node.RegisterApis(apis, modules, handler, exposeAll)
|
||||
if err != nil {
|
||||
utils.Fatalf("Could not register WS API: %w", err)
|
||||
}
|
||||
@ -50,7 +49,6 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
|
||||
wsServer := NewWSServer(wsOrigins, handler)
|
||||
wsServer.Handler = prom.WSMiddleware(wsServer.Handler)
|
||||
go wsServer.Serve(listener)
|
||||
log.Infof("WS endpoint opened at ws://%s", endpoint)
|
||||
|
||||
return listener, handler, err
|
||||
|
||||
|
||||
@ -17,8 +17,13 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/cerc-io/plugeth-statediff/types"
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
)
|
||||
|
||||
// APIName is the namespace used for the state diffing service API
|
||||
@ -41,11 +46,49 @@ func NewPublicServerAPI(w Server, client *rpc.Client) *PublicServerAPI {
|
||||
}
|
||||
}
|
||||
|
||||
// Stream is the public method to setup a subscription that fires off IPLD payloads as they are processed
|
||||
func (api *PublicServerAPI) Stream(ctx context.Context, params eth.SubscriptionSettings) (*rpc.Subscription, error) {
|
||||
// ensure that the RPC connection supports subscriptions
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return nil, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
// create subscription and start waiting for stream events
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
|
||||
go func() {
|
||||
// subscribe to events from the SyncPublishScreenAndServe service
|
||||
payloadChannel := make(chan SubscriptionPayload, PayloadChanBufferSize)
|
||||
quitChan := make(chan bool, 1)
|
||||
go api.w.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
|
||||
|
||||
// loop and await payloads and relay them to the subscriber using notifier
|
||||
for {
|
||||
select {
|
||||
case packet := <-payloadChannel:
|
||||
if err := notifier.Notify(rpcSub.ID, packet); err != nil {
|
||||
log.Error("Failed to send watcher data packet", "err", err)
|
||||
api.w.Unsubscribe(rpcSub.ID)
|
||||
return
|
||||
}
|
||||
case <-rpcSub.Err():
|
||||
api.w.Unsubscribe(rpcSub.ID)
|
||||
return
|
||||
case <-quitChan:
|
||||
// don't need to unsubscribe from the watcher, the service does so before sending the quit signal this way
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// WatchAddress makes a geth WatchAddress API call with the given operation and args
|
||||
func (api *PublicServerAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error {
|
||||
err := api.rpc.Call(nil, "statediff_watchAddress", operation, args)
|
||||
if err != nil {
|
||||
// Return the error directly to match the native Geth API
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -24,61 +24,37 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||
"github.com/cerc-io/plugeth-statediff/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/prom"
|
||||
ethServerShared "github.com/cerc-io/ipld-eth-server/v5/pkg/shared"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/prom"
|
||||
ethServerShared "github.com/vulcanize/ipld-eth-server/v4/pkg/shared"
|
||||
)
|
||||
|
||||
// Env variables
|
||||
const (
|
||||
SERVER_WS_PATH = "SERVER_WS_PATH"
|
||||
SERVER_IPC_PATH = "SERVER_IPC_PATH"
|
||||
SERVER_HTTP_PATH = "SERVER_HTTP_PATH"
|
||||
SERVER_GRAPHQL_PATH = "SERVER_GRAPHQL_PATH"
|
||||
SERVER_WS_PATH = "SERVER_WS_PATH"
|
||||
SERVER_IPC_PATH = "SERVER_IPC_PATH"
|
||||
SERVER_HTTP_PATH = "SERVER_HTTP_PATH"
|
||||
|
||||
SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS"
|
||||
SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS"
|
||||
SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME"
|
||||
|
||||
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
|
||||
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
|
||||
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
|
||||
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
|
||||
ETH_STATEDIFF_TIMEOUT = "ETH_STATEDIFF_TIMEOUT"
|
||||
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
|
||||
ETH_FORWARD_GET_STORAGE_AT = "ETH_FORWARD_GET_STORAGE_AT"
|
||||
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
|
||||
ETH_GETLOGS_BLOCK_LIMIT = "ETH_GETLOGS_BLOCK_LIMIT"
|
||||
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
|
||||
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
|
||||
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
|
||||
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
|
||||
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
|
||||
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
|
||||
|
||||
VALIDATOR_ENABLED = "VALIDATOR_ENABLED"
|
||||
VALIDATOR_EVERY_NTH_BLOCK = "VALIDATOR_EVERY_NTH_BLOCK"
|
||||
|
||||
HTTP_TIMEOUT = "HTTP_TIMEOUT"
|
||||
|
||||
ETH_WS_PATH = "ETH_WS_PATH"
|
||||
ETH_HTTP_PATH = "ETH_HTTP_PATH"
|
||||
ETH_NODE_ID = "ETH_NODE_ID"
|
||||
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
||||
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
||||
ETH_NETWORK_ID = "ETH_NETWORK_ID"
|
||||
ETH_CHAIN_ID = "ETH_CHAIN_ID"
|
||||
|
||||
DATABASE_NAME = "DATABASE_NAME"
|
||||
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||
DATABASE_PORT = "DATABASE_PORT"
|
||||
DATABASE_USER = "DATABASE_USER"
|
||||
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
||||
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||
)
|
||||
|
||||
// Config struct
|
||||
@ -98,18 +74,21 @@ type Config struct {
|
||||
EthGraphqlEnabled bool
|
||||
EthGraphqlEndpoint string
|
||||
|
||||
ChainConfig *params.ChainConfig
|
||||
DefaultSender *common.Address
|
||||
RPCGasCap *big.Int
|
||||
EthHttpEndpoint string
|
||||
Client *rpc.Client
|
||||
SupportStateDiff bool
|
||||
StateDiffTimeout time.Duration
|
||||
ForwardEthCalls bool
|
||||
ForwardGetStorageAt bool
|
||||
ProxyOnError bool
|
||||
GetLogsBlockLimit int64
|
||||
NodeNetworkID string
|
||||
IpldGraphqlEnabled bool
|
||||
IpldGraphqlEndpoint string
|
||||
IpldPostgraphileEndpoint string
|
||||
TracingHttpEndpoint string
|
||||
TracingPostgraphileEndpoint string
|
||||
|
||||
ChainConfig *params.ChainConfig
|
||||
DefaultSender *common.Address
|
||||
RPCGasCap *big.Int
|
||||
EthHttpEndpoint string
|
||||
Client *rpc.Client
|
||||
SupportStateDiff bool
|
||||
ForwardEthCalls bool
|
||||
ProxyOnError bool
|
||||
NodeNetworkID string
|
||||
|
||||
// Cache configuration.
|
||||
GroupCache *ethServerShared.GroupCacheConfig
|
||||
@ -123,70 +102,32 @@ type Config struct {
|
||||
func NewConfig() (*Config, error) {
|
||||
c := new(Config)
|
||||
|
||||
viper.BindEnv("server.httpPath", SERVER_HTTP_PATH)
|
||||
viper.BindEnv("server.wsPath", SERVER_WS_PATH)
|
||||
viper.BindEnv("server.ipcPath", SERVER_IPC_PATH)
|
||||
viper.BindEnv("server.graphqlPath", SERVER_GRAPHQL_PATH)
|
||||
|
||||
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
|
||||
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG)
|
||||
viper.BindEnv("ethereum.httpPath", ETH_HTTP_PATH)
|
||||
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR)
|
||||
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP)
|
||||
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG)
|
||||
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF)
|
||||
viper.BindEnv("ethereum.stateDiffTimeout", ETH_STATEDIFF_TIMEOUT)
|
||||
viper.BindEnv("ethereum.forwardEthCalls", ETH_FORWARD_ETH_CALLS)
|
||||
viper.BindEnv("ethereum.forwardGetStorageAt", ETH_FORWARD_GET_STORAGE_AT)
|
||||
viper.BindEnv("ethereum.proxyOnError", ETH_PROXY_ON_ERROR)
|
||||
viper.BindEnv("ethereum.getLogsBlockLimit", ETH_GETLOGS_BLOCK_LIMIT)
|
||||
viper.BindEnv("log.file", "LOG_FILE")
|
||||
viper.BindEnv("log.level", "LOG_LEVEL")
|
||||
|
||||
c.dbInit()
|
||||
ethHTTP := viper.GetString("ethereum.httpPath")
|
||||
ethHTTPEndpoint := fmt.Sprintf("http://%s", ethHTTP)
|
||||
|
||||
// At least one of chain ID and chain config must be passed.
|
||||
// If both are passed, the chain ID must match the config.
|
||||
chainID := viper.GetUint64("ethereum.chainID")
|
||||
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||
var err error
|
||||
if chainConfigPath != "" {
|
||||
if c.ChainConfig, err = utils.LoadConfig(chainConfigPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only validate the chain ID if it was actually passed
|
||||
if viper.GetString("ethereum.chainID") != "" && c.ChainConfig.ChainID.Uint64() != chainID {
|
||||
return nil, fmt.Errorf("passed chain ID %d does not match chain config chain ID %d",
|
||||
chainID, c.ChainConfig.ChainID.Uint64())
|
||||
}
|
||||
} else {
|
||||
if c.ChainConfig, err = utils.ChainConfig(chainID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
nodeInfo, cli, err := getEthNodeAndClient(ethHTTPEndpoint, chainID)
|
||||
nodeInfo, cli, err := getEthNodeAndClient(ethHTTPEndpoint)
|
||||
c.NodeNetworkID = nodeInfo.NetworkID
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.NodeNetworkID = nodeInfo.NetworkID
|
||||
c.Client = cli
|
||||
c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff")
|
||||
c.ForwardEthCalls = viper.GetBool("ethereum.forwardEthCalls")
|
||||
c.ForwardGetStorageAt = viper.GetBool("ethereum.forwardGetStorageAt")
|
||||
c.ProxyOnError = viper.GetBool("ethereum.proxyOnError")
|
||||
c.EthHttpEndpoint = ethHTTPEndpoint
|
||||
|
||||
if viper.IsSet("ethereum.getLogsBlockLimit") {
|
||||
c.GetLogsBlockLimit = viper.GetInt64("ethereum.getLogsBlockLimit")
|
||||
} else {
|
||||
c.GetLogsBlockLimit = 500
|
||||
}
|
||||
|
||||
// websocket server
|
||||
wsEnabled := viper.GetBool("server.ws")
|
||||
wsEnabled := viper.GetBool("eth.server.ws")
|
||||
if wsEnabled {
|
||||
wsPath := viper.GetString("server.wsPath")
|
||||
wsPath := viper.GetString("eth.server.wsPath")
|
||||
if wsPath == "" {
|
||||
wsPath = "127.0.0.1:8080"
|
||||
}
|
||||
@ -195,9 +136,9 @@ func NewConfig() (*Config, error) {
|
||||
c.WSEnabled = wsEnabled
|
||||
|
||||
// ipc server
|
||||
ipcEnabled := viper.GetBool("server.ipc")
|
||||
ipcEnabled := viper.GetBool("eth.server.ipc")
|
||||
if ipcEnabled {
|
||||
ipcPath := viper.GetString("server.ipcPath")
|
||||
ipcPath := viper.GetString("eth.server.ipcPath")
|
||||
if ipcPath == "" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
@ -210,9 +151,9 @@ func NewConfig() (*Config, error) {
|
||||
c.IPCEnabled = ipcEnabled
|
||||
|
||||
// http server
|
||||
httpEnabled := viper.GetBool("server.http")
|
||||
httpEnabled := viper.GetBool("eth.server.http")
|
||||
if httpEnabled {
|
||||
httpPath := viper.GetString("server.httpPath")
|
||||
httpPath := viper.GetString("eth.server.httpPath")
|
||||
if httpPath == "" {
|
||||
httpPath = "127.0.0.1:8081"
|
||||
}
|
||||
@ -221,9 +162,9 @@ func NewConfig() (*Config, error) {
|
||||
c.HTTPEnabled = httpEnabled
|
||||
|
||||
// eth graphql endpoint
|
||||
ethGraphqlEnabled := viper.GetBool("server.graphql")
|
||||
ethGraphqlEnabled := viper.GetBool("eth.server.graphql")
|
||||
if ethGraphqlEnabled {
|
||||
ethGraphqlPath := viper.GetString("server.graphqlPath")
|
||||
ethGraphqlPath := viper.GetString("eth.server.graphqlPath")
|
||||
if ethGraphqlPath == "" {
|
||||
ethGraphqlPath = "127.0.0.1:8082"
|
||||
}
|
||||
@ -231,6 +172,34 @@ func NewConfig() (*Config, error) {
|
||||
}
|
||||
c.EthGraphqlEnabled = ethGraphqlEnabled
|
||||
|
||||
// ipld graphql endpoint
|
||||
ipldGraphqlEnabled := viper.GetBool("ipld.server.graphql")
|
||||
if ipldGraphqlEnabled {
|
||||
ipldGraphqlPath := viper.GetString("ipld.server.graphqlPath")
|
||||
if ipldGraphqlPath == "" {
|
||||
ipldGraphqlPath = "127.0.0.1:8083"
|
||||
}
|
||||
c.IpldGraphqlEndpoint = ipldGraphqlPath
|
||||
|
||||
ipldPostgraphilePath := viper.GetString("ipld.postgraphilePath")
|
||||
if ipldPostgraphilePath == "" {
|
||||
return nil, errors.New("ipld-postgraphile-path parameter is empty")
|
||||
}
|
||||
c.IpldPostgraphileEndpoint = ipldPostgraphilePath
|
||||
|
||||
tracingHttpEndpoint := viper.GetString("tracing.httpPath")
|
||||
tracingPostgraphilePath := viper.GetString("tracing.postgraphilePath")
|
||||
|
||||
// these two parameters either can be both empty or both set
|
||||
if (tracingHttpEndpoint == "" && tracingPostgraphilePath != "") || (tracingHttpEndpoint != "" && tracingPostgraphilePath == "") {
|
||||
return nil, errors.New("tracing.httpPath and tracing.postgraphilePath parameters either can be both empty or both set")
|
||||
}
|
||||
|
||||
c.TracingHttpEndpoint = tracingHttpEndpoint
|
||||
c.TracingPostgraphileEndpoint = tracingPostgraphilePath
|
||||
}
|
||||
c.IpldGraphqlEnabled = ipldGraphqlEnabled
|
||||
|
||||
overrideDBConnConfig(&c.DBConfig)
|
||||
serveDB, err := ethServerShared.NewDB(c.DBConfig.DbConnectionString(), c.DBConfig)
|
||||
if err != nil {
|
||||
@ -240,24 +209,22 @@ func NewConfig() (*Config, error) {
|
||||
prom.RegisterDBCollector(c.DBConfig.DatabaseName, serveDB)
|
||||
c.DB = serveDB
|
||||
|
||||
defaultSenderStr := viper.GetString("ethereum.defaultSender")
|
||||
if defaultSenderStr != "" {
|
||||
sender := common.HexToAddress(defaultSenderStr)
|
||||
c.DefaultSender = &sender
|
||||
}
|
||||
rpcGasCapStr := viper.GetString("ethereum.rpcGasCap")
|
||||
if rpcGasCapStr != "" {
|
||||
if rpcGasCap, ok := new(big.Int).SetString(rpcGasCapStr, 10); ok {
|
||||
c.RPCGasCap = rpcGasCap
|
||||
}
|
||||
} else {
|
||||
c.RPCGasCap = big.NewInt(0)
|
||||
}
|
||||
if sdTimeout := viper.GetString("ethereum.stateDiffTimeout"); sdTimeout != "" {
|
||||
var err error
|
||||
if c.StateDiffTimeout, err = time.ParseDuration(sdTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||
if chainConfigPath != "" {
|
||||
c.ChainConfig, err = statediff.LoadConfig(chainConfigPath)
|
||||
} else {
|
||||
c.StateDiffTimeout = ethServerShared.DefaultStateDiffTimeout
|
||||
}
|
||||
if c.StateDiffTimeout < 0 {
|
||||
return nil, errors.New("ethereum.stateDiffTimeout < 0")
|
||||
c.ChainConfig, err = statediff.ChainConfig(nodeInfo.ChainID)
|
||||
}
|
||||
|
||||
c.loadGroupCacheConfig()
|
||||
@ -326,23 +293,3 @@ func (c *Config) loadValidatorConfig() {
|
||||
c.StateValidationEnabled = viper.GetBool("validator.enabled")
|
||||
c.StateValidationEveryNthBlock = viper.GetUint64("validator.everyNthBlock")
|
||||
}
|
||||
|
||||
// GetEthNodeAndClient returns eth node info and client from path url
|
||||
func getEthNodeAndClient(path string, chainid uint64) (node.Info, *rpc.Client, error) {
|
||||
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
|
||||
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
|
||||
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
|
||||
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
|
||||
|
||||
rpcClient, err := rpc.Dial(path)
|
||||
if err != nil {
|
||||
return node.Info{}, nil, err
|
||||
}
|
||||
return node.Info{
|
||||
ID: viper.GetString("ethereum.nodeID"),
|
||||
ClientName: viper.GetString("ethereum.clientName"),
|
||||
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
|
||||
NetworkID: viper.GetString("ethereum.networkID"),
|
||||
ChainID: chainid,
|
||||
}, rpcClient, nil
|
||||
}
|
||||
|
||||
50
pkg/serve/env.go
Normal file
50
pkg/serve/env.go
Normal file
@ -0,0 +1,50 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// Env variables
|
||||
const (
|
||||
HTTP_TIMEOUT = "HTTP_TIMEOUT"
|
||||
|
||||
ETH_WS_PATH = "ETH_WS_PATH"
|
||||
ETH_HTTP_PATH = "ETH_HTTP_PATH"
|
||||
ETH_NODE_ID = "ETH_NODE_ID"
|
||||
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
||||
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
||||
ETH_NETWORK_ID = "ETH_NETWORK_ID"
|
||||
ETH_CHAIN_ID = "ETH_CHAIN_ID"
|
||||
|
||||
DATABASE_NAME = "DATABASE_NAME"
|
||||
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||
DATABASE_PORT = "DATABASE_PORT"
|
||||
DATABASE_USER = "DATABASE_USER"
|
||||
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
||||
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||
)
|
||||
|
||||
// GetEthNodeAndClient returns eth node info and client from path url
|
||||
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
|
||||
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
|
||||
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
|
||||
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
|
||||
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
|
||||
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
|
||||
|
||||
rpcClient, err := rpc.Dial(path)
|
||||
if err != nil {
|
||||
return node.Info{}, nil, err
|
||||
}
|
||||
return node.Info{
|
||||
ID: viper.GetString("ethereum.nodeID"),
|
||||
ClientName: viper.GetString("ethereum.clientName"),
|
||||
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
|
||||
NetworkID: viper.GetString("ethereum.networkID"),
|
||||
ChainID: viper.GetUint64("ethereum.chainID"),
|
||||
}, rpcClient, nil
|
||||
}
|
||||
@ -14,16 +14,24 @@
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package eth_state_test
|
||||
package serve
|
||||
|
||||
import (
|
||||
"testing"
|
||||
import log "github.com/sirupsen/logrus"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestETHSuite(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "ipld-eth-server/pkg/eth/state_test")
|
||||
func sendNonBlockingErr(sub Subscription, err error) {
|
||||
log.Error(err)
|
||||
select {
|
||||
case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: err.Error(), Flag: EmptyFlag}:
|
||||
default:
|
||||
log.Infof("unable to send error to subscription %s", sub.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func sendNonBlockingQuit(sub Subscription) {
|
||||
select {
|
||||
case sub.QuitChan <- true:
|
||||
log.Infof("closing subscription %s", sub.ID)
|
||||
default:
|
||||
log.Infof("unable to close subscription %s; channel has no receiver", sub.ID)
|
||||
}
|
||||
}
|
||||
@ -17,21 +17,22 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
ethnode "github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/jmoiron/sqlx"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/debug"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/eth"
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/net"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/eth"
|
||||
"github.com/vulcanize/ipld-eth-server/v4/pkg/net"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -47,7 +48,11 @@ type Server interface {
|
||||
APIs() []rpc.API
|
||||
Protocols() []p2p.Protocol
|
||||
// Pub-Sub handling event loop
|
||||
Serve(wg *sync.WaitGroup)
|
||||
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload)
|
||||
// Method to subscribe to the service
|
||||
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
|
||||
// Method to unsubscribe from the service
|
||||
Unsubscribe(id rpc.ID)
|
||||
// Backend exposes the server's backend
|
||||
Backend() *eth.Backend
|
||||
}
|
||||
@ -56,24 +61,30 @@ type Server interface {
|
||||
type Service struct {
|
||||
// Used to sync access to the Subscriptions
|
||||
sync.Mutex
|
||||
// Interface for filtering and serving data according to subscribed clients according to their specification
|
||||
Filterer eth.Filterer
|
||||
// Interface for fetching IPLD objects from IPFS
|
||||
IPLDFetcher eth.Fetcher
|
||||
// Interface for searching and retrieving CIDs from Postgres index
|
||||
Retriever eth.Retriever
|
||||
// Used to signal shutdown of the service
|
||||
QuitChan chan bool
|
||||
// Underlying db connection pool
|
||||
// A mapping of rpc.IDs to their subscription channels, mapped to their subscription type (hash of the StreamFilters)
|
||||
Subscriptions map[common.Hash]map[rpc.ID]Subscription
|
||||
// A mapping of subscription params hash to the corresponding subscription params
|
||||
SubscriptionTypes map[common.Hash]eth.SubscriptionSettings
|
||||
// Underlying db
|
||||
db *sqlx.DB
|
||||
// wg for syncing serve processes
|
||||
serveWg *sync.WaitGroup
|
||||
// rpc client for forwarding cache misses
|
||||
client *rpc.Client
|
||||
// whether the proxied client supports state diffing
|
||||
supportsStateDiffing bool
|
||||
// timeout for statediff RPC calls
|
||||
stateDiffTimeout time.Duration
|
||||
// backend for the server
|
||||
backend *eth.Backend
|
||||
// whether to forward eth_calls directly to proxy node
|
||||
forwardEthCalls bool
|
||||
// whether to forward eth_getStorageAt directly to proxy node
|
||||
forwardGetStorageAt bool
|
||||
// the maximum size of the block range to use in GetLogs
|
||||
getLogsBlockLimit int64
|
||||
// whether to forward all calls to proxy node if they throw an error locally
|
||||
proxyOnError bool
|
||||
// eth node network id
|
||||
@ -83,20 +94,23 @@ type Service struct {
|
||||
// NewServer creates a new Server using an underlying Service struct
|
||||
func NewServer(settings *Config) (Server, error) {
|
||||
sap := new(Service)
|
||||
sap.Retriever = eth.NewCIDRetriever(settings.DB)
|
||||
sap.IPLDFetcher = eth.NewIPLDFetcher(settings.DB)
|
||||
sap.Filterer = eth.NewResponseFilterer()
|
||||
sap.db = settings.DB
|
||||
sap.QuitChan = make(chan bool)
|
||||
sap.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
|
||||
sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings)
|
||||
sap.client = settings.Client
|
||||
sap.supportsStateDiffing = settings.SupportStateDiff
|
||||
sap.stateDiffTimeout = settings.StateDiffTimeout
|
||||
sap.forwardEthCalls = settings.ForwardEthCalls
|
||||
sap.forwardGetStorageAt = settings.ForwardGetStorageAt
|
||||
sap.getLogsBlockLimit = settings.GetLogsBlockLimit
|
||||
sap.proxyOnError = settings.ProxyOnError
|
||||
sap.nodeNetworkId = settings.NodeNetworkID
|
||||
var err error
|
||||
sap.backend, err = eth.NewEthBackend(sap.db, ð.Config{
|
||||
ChainConfig: settings.ChainConfig,
|
||||
VMConfig: vm.Config{NoBaseFee: true},
|
||||
DefaultSender: settings.DefaultSender,
|
||||
RPCGasCap: settings.RPCGasCap,
|
||||
GroupCacheConfig: settings.GroupCache,
|
||||
})
|
||||
@ -125,44 +139,207 @@ func (sap *Service) APIs() []rpc.API {
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
conf := eth.APIConfig{
|
||||
SupportsStateDiff: sap.supportsStateDiffing,
|
||||
ForwardEthCalls: sap.forwardEthCalls,
|
||||
ForwardGetStorageAt: sap.forwardGetStorageAt,
|
||||
ProxyOnError: sap.proxyOnError,
|
||||
StateDiffTimeout: sap.stateDiffTimeout,
|
||||
GetLogsBlockLimit: sap.getLogsBlockLimit,
|
||||
}
|
||||
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, conf)
|
||||
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing, sap.forwardEthCalls, sap.proxyOnError)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create public eth api: %v", err)
|
||||
}
|
||||
|
||||
debugTracerAPI := tracers.APIs(&debug.Backend{Backend: *sap.backend})[0]
|
||||
|
||||
return append(apis,
|
||||
rpc.API{
|
||||
Namespace: eth.APIName,
|
||||
Version: eth.APIVersion,
|
||||
Service: ethAPI,
|
||||
Public: true,
|
||||
},
|
||||
debugTracerAPI,
|
||||
)
|
||||
return append(apis, rpc.API{
|
||||
Namespace: eth.APIName,
|
||||
Version: eth.APIVersion,
|
||||
Service: ethAPI,
|
||||
Public: true,
|
||||
})
|
||||
}
|
||||
|
||||
// Serve listens for incoming converter data off the screenAndServePayload from the Sync process
|
||||
// It filters and sends this data to any subscribers to the service
|
||||
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
|
||||
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
|
||||
func (sap *Service) Serve(wg *sync.WaitGroup) {
|
||||
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) {
|
||||
sap.serveWg = wg
|
||||
go func() {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
<-sap.QuitChan
|
||||
log.Info("quiting eth ipld server process")
|
||||
for {
|
||||
select {
|
||||
case payload := <-screenAndServePayload:
|
||||
sap.filterAndServe(payload)
|
||||
case <-sap.QuitChan:
|
||||
log.Info("quiting eth ipld server process")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Debug("eth ipld server process successfully spun up")
|
||||
log.Info("eth ipld server process successfully spun up")
|
||||
}
|
||||
|
||||
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
|
||||
func (sap *Service) filterAndServe(payload eth.ConvertedPayload) {
|
||||
log.Debug("sending eth ipld payload to subscriptions")
|
||||
sap.Lock()
|
||||
sap.serveWg.Add(1)
|
||||
defer sap.Unlock()
|
||||
defer sap.serveWg.Done()
|
||||
for ty, subs := range sap.Subscriptions {
|
||||
// Retrieve the subscription parameters for this subscription type
|
||||
subConfig, ok := sap.SubscriptionTypes[ty]
|
||||
if !ok {
|
||||
log.Errorf("eth ipld server subscription configuration for subscription type %s not available", ty.Hex())
|
||||
sap.closeType(ty)
|
||||
continue
|
||||
}
|
||||
if subConfig.End.Int64() > 0 && subConfig.End.Int64() < payload.Block.Number().Int64() {
|
||||
// We are not out of range for this subscription type
|
||||
// close it, and continue to the next
|
||||
sap.closeType(ty)
|
||||
continue
|
||||
}
|
||||
response, err := sap.Filterer.Filter(subConfig, payload)
|
||||
if err != nil {
|
||||
log.Errorf("eth ipld server filtering error: %v", err)
|
||||
sap.closeType(ty)
|
||||
continue
|
||||
}
|
||||
responseRLP, err := rlp.EncodeToBytes(response)
|
||||
if err != nil {
|
||||
log.Errorf("eth ipld server rlp encoding error: %v", err)
|
||||
continue
|
||||
}
|
||||
for id, sub := range subs {
|
||||
select {
|
||||
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.BlockNumber.Int64()}:
|
||||
log.Debugf("sending eth ipld server payload to subscription %s", id)
|
||||
default:
|
||||
log.Infof("unable to send eth ipld payload to subscription %s; channel has no receiver", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe is used by the API to remotely subscribe to the service loop
|
||||
// The params must be rlp serializable and satisfy the SubscriptionSettings() interface
|
||||
func (sap *Service) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings) {
|
||||
sap.serveWg.Add(1)
|
||||
defer sap.serveWg.Done()
|
||||
log.Infof("new eth ipld subscription %s", id)
|
||||
subscription := Subscription{
|
||||
ID: id,
|
||||
PayloadChan: sub,
|
||||
QuitChan: quitChan,
|
||||
}
|
||||
// Subscription type is defined as the hash of the rlp-serialized subscription settings
|
||||
by, err := rlp.EncodeToBytes(params)
|
||||
if err != nil {
|
||||
sendNonBlockingErr(subscription, err)
|
||||
sendNonBlockingQuit(subscription)
|
||||
return
|
||||
}
|
||||
subscriptionType := crypto.Keccak256Hash(by)
|
||||
if !params.BackFillOnly {
|
||||
// Add subscriber
|
||||
sap.Lock()
|
||||
if sap.Subscriptions[subscriptionType] == nil {
|
||||
sap.Subscriptions[subscriptionType] = make(map[rpc.ID]Subscription)
|
||||
}
|
||||
sap.Subscriptions[subscriptionType][id] = subscription
|
||||
sap.SubscriptionTypes[subscriptionType] = params
|
||||
sap.Unlock()
|
||||
}
|
||||
// If the subscription requests a backfill, use the Postgres index to lookup and retrieve historical data
|
||||
// Otherwise we only filter new data as it is streamed in from the state diffing geth node
|
||||
if params.BackFill || params.BackFillOnly {
|
||||
if err := sap.sendHistoricalData(subscription, id, params); err != nil {
|
||||
sendNonBlockingErr(subscription, fmt.Errorf("eth ipld server subscription backfill error: %v", err))
|
||||
sendNonBlockingQuit(subscription)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendHistoricalData sends historical data to the requesting subscription
|
||||
func (sap *Service) sendHistoricalData(sub Subscription, id rpc.ID, params eth.SubscriptionSettings) error {
|
||||
log.Infof("sending eth ipld historical data to subscription %s", id)
|
||||
// Retrieve cached CIDs relevant to this subscriber
|
||||
var endingBlock int64
|
||||
var startingBlock int64
|
||||
var err error
|
||||
startingBlock, err = sap.Retriever.RetrieveFirstBlockNumber()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if startingBlock < params.Start.Int64() {
|
||||
startingBlock = params.Start.Int64()
|
||||
}
|
||||
endingBlock, err = sap.Retriever.RetrieveLastBlockNumber()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if endingBlock > params.End.Int64() && params.End.Int64() > 0 && params.End.Int64() > startingBlock {
|
||||
endingBlock = params.End.Int64()
|
||||
}
|
||||
log.Debugf("eth ipld historical data starting block: %d", params.Start.Int64())
|
||||
log.Debugf("eth ipld historical data ending block: %d", endingBlock)
|
||||
go func() {
|
||||
sap.serveWg.Add(1)
|
||||
defer sap.serveWg.Done()
|
||||
for i := startingBlock; i <= endingBlock; i++ {
|
||||
select {
|
||||
case <-sap.QuitChan:
|
||||
log.Infof("ethereum historical data feed to subscription %s closed", id)
|
||||
return
|
||||
default:
|
||||
}
|
||||
cidWrappers, empty, err := sap.Retriever.Retrieve(params, i)
|
||||
if err != nil {
|
||||
sendNonBlockingErr(sub, fmt.Errorf("eth ipld server cid retrieval error at block %d\r%s", i, err.Error()))
|
||||
continue
|
||||
}
|
||||
if empty {
|
||||
continue
|
||||
}
|
||||
for _, cids := range cidWrappers {
|
||||
response, err := sap.IPLDFetcher.Fetch(cids)
|
||||
if err != nil {
|
||||
sendNonBlockingErr(sub, fmt.Errorf("eth ipld server ipld fetching error at block %d\r%s", i, err.Error()))
|
||||
continue
|
||||
}
|
||||
responseRLP, err := rlp.EncodeToBytes(response)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case sub.PayloadChan <- SubscriptionPayload{Data: responseRLP, Err: "", Flag: EmptyFlag, Height: response.BlockNumber.Int64()}:
|
||||
log.Debugf("eth ipld server sending historical data payload to subscription %s", id)
|
||||
default:
|
||||
log.Infof("eth ipld server unable to send backFill payload to subscription %s; channel has no receiver", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
// when we are done backfilling send an empty payload signifying so in the msg
|
||||
select {
|
||||
case sub.PayloadChan <- SubscriptionPayload{Data: nil, Err: "", Flag: BackFillCompleteFlag}:
|
||||
log.Debugf("eth ipld server sending backFill completion notice to subscription %s", id)
|
||||
default:
|
||||
log.Infof("eth ipld server unable to send backFill completion notice to subscription %s", id)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unsubscribe is used by the API to remotely unsubscribe to the StateDiffingService loop
|
||||
func (sap *Service) Unsubscribe(id rpc.ID) {
|
||||
log.Infof("unsubscribing %s from the eth ipld server", id)
|
||||
sap.Lock()
|
||||
for ty := range sap.Subscriptions {
|
||||
delete(sap.Subscriptions[ty], id)
|
||||
if len(sap.Subscriptions[ty]) == 0 {
|
||||
// If we removed the last subscription of this type, remove the subscription type outright
|
||||
delete(sap.Subscriptions, ty)
|
||||
delete(sap.SubscriptionTypes, ty)
|
||||
}
|
||||
}
|
||||
sap.Unlock()
|
||||
}
|
||||
|
||||
// Start is used to begin the service
|
||||
@ -170,16 +347,18 @@ func (sap *Service) Serve(wg *sync.WaitGroup) {
|
||||
func (sap *Service) Start() error {
|
||||
log.Info("starting eth ipld server")
|
||||
wg := new(sync.WaitGroup)
|
||||
sap.Serve(wg)
|
||||
payloadChan := make(chan eth.ConvertedPayload, PayloadChanBufferSize)
|
||||
sap.Serve(wg, payloadChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop is used to close down the service
|
||||
// This is mostly just to satisfy the node.Service interface
|
||||
func (sap *Service) Stop() error {
|
||||
log.Info("stopping eth ipld server")
|
||||
log.Infof("stopping eth ipld server")
|
||||
sap.Lock()
|
||||
close(sap.QuitChan)
|
||||
sap.close()
|
||||
sap.Unlock()
|
||||
return nil
|
||||
}
|
||||
@ -188,3 +367,28 @@ func (sap *Service) Stop() error {
|
||||
func (sap *Service) Backend() *eth.Backend {
|
||||
return sap.backend
|
||||
}
|
||||
|
||||
// close is used to close all listening subscriptions
|
||||
// close needs to be called with subscription access locked
|
||||
func (sap *Service) close() {
|
||||
log.Infof("closing all eth ipld server subscriptions")
|
||||
for subType, subs := range sap.Subscriptions {
|
||||
for _, sub := range subs {
|
||||
sendNonBlockingQuit(sub)
|
||||
}
|
||||
delete(sap.Subscriptions, subType)
|
||||
delete(sap.SubscriptionTypes, subType)
|
||||
}
|
||||
}
|
||||
|
||||
// closeType is used to close all subscriptions of given type
|
||||
// closeType needs to be called with subscription access locked
|
||||
func (sap *Service) closeType(subType common.Hash) {
|
||||
log.Infof("closing all eth ipld server subscriptions of type %s", subType.String())
|
||||
subs := sap.Subscriptions[subType]
|
||||
for _, sub := range subs {
|
||||
sendNonBlockingQuit(sub)
|
||||
}
|
||||
delete(sap.Subscriptions, subType)
|
||||
delete(sap.SubscriptionTypes, subType)
|
||||
}
|
||||
|
||||
60
pkg/serve/subscription.go
Normal file
60
pkg/serve/subscription.go
Normal file
@ -0,0 +1,60 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package serve
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type Flag int32
|
||||
|
||||
const (
|
||||
EmptyFlag Flag = iota
|
||||
BackFillCompleteFlag
|
||||
)
|
||||
|
||||
// Subscription holds the information for an individual client subscription to the watcher
|
||||
type Subscription struct {
|
||||
ID rpc.ID
|
||||
PayloadChan chan<- SubscriptionPayload
|
||||
QuitChan chan<- bool
|
||||
}
|
||||
|
||||
// SubscriptionPayload is the struct for a watcher data subscription payload
|
||||
// It carries data of a type specific to the chain being supported/queried and an error message
|
||||
type SubscriptionPayload struct {
|
||||
Data []byte `json:"data"` // e.g. for Ethereum rlp serialized eth.StreamPayload
|
||||
Height int64 `json:"height"`
|
||||
Err string `json:"err"` // field for error
|
||||
Flag Flag `json:"flag"` // field for message
|
||||
}
|
||||
|
||||
func (sp SubscriptionPayload) Error() error {
|
||||
if sp.Err == "" {
|
||||
return nil
|
||||
}
|
||||
return errors.New(sp.Err)
|
||||
}
|
||||
|
||||
func (sp SubscriptionPayload) BackFillComplete() bool {
|
||||
if sp.Flag == BackFillCompleteFlag {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -16,12 +16,9 @@
|
||||
|
||||
package shared
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
DefaultMaxBatchSize uint64 = 100
|
||||
DefaultMaxBatchNumber int64 = 50
|
||||
DefaultStateDiffTimeout time.Duration = 240 * time.Second
|
||||
DefaultMaxBatchSize uint64 = 100
|
||||
DefaultMaxBatchNumber int64 = 50
|
||||
|
||||
GcachePoolEnabled = "GCACHE_POOL_ENABLED"
|
||||
GcachePoolHttpPath = "GCACHE_POOL_HTTP_PATH"
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
|
||||
@ -17,9 +17,12 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"github.com/cerc-io/ipld-eth-server/v5/pkg/log"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-cid"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer
|
||||
@ -41,6 +44,36 @@ func HandleZeroAddr(to common.Address) string {
|
||||
// Rollback sql transaction and log any error
|
||||
func Rollback(tx *sqlx.Tx) {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.Error(err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FetchIPLDByMhKeyAndBlockNumber is used to retrieve an ipld from Postgres blockstore with the provided tx, mhkey string and blockNumber
|
||||
func FetchIPLDByMhKeyAndBlockNumber(tx *sqlx.Tx, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||
var block []byte
|
||||
return block, tx.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// FetchIPLD is used to retrieve an IPLD from Postgres mhkey and blockNumber
|
||||
func FetchIPLD(db *sqlx.DB, mhKey string, blockNumber uint64) ([]byte, error) {
|
||||
pgStr := `SELECT data FROM public.blocks WHERE key = $1 AND block_number = $2`
|
||||
var block []byte
|
||||
return block, db.Get(&block, pgStr, mhKey, blockNumber)
|
||||
}
|
||||
|
||||
// MultihashKeyFromCID converts a cid into a blockstore-prefixed multihash db key string
|
||||
func MultihashKeyFromCID(c cid.Cid) string {
|
||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||
return blockstore.BlockPrefix.String() + dbKey.String()
|
||||
}
|
||||
|
||||
// MultihashKeyFromCIDString converts a cid string into a blockstore-prefixed multihash db key string
|
||||
func MultihashKeyFromCIDString(c string) (string, error) {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dbKey := dshelp.MultihashToDsKey(dc.Hash())
|
||||
return blockstore.BlockPrefix.String() + dbKey.String(), nil
|
||||
}
|
||||
|
||||
@ -19,17 +19,18 @@ package shared
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
@ -45,8 +46,7 @@ func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool {
|
||||
|
||||
// SetupDB is use to setup a db for watcher tests
|
||||
func SetupDB() *sqlx.DB {
|
||||
config, err := postgres.TestConfig.WithEnv()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config := getTestDBConfig()
|
||||
|
||||
db, err := NewDB(config.DbConnectionString(), config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -56,7 +56,34 @@ func SetupDB() *sqlx.DB {
|
||||
|
||||
// TearDownDB is used to tear down the watcher dbs after tests
|
||||
func TearDownDB(db *sqlx.DB) {
|
||||
err := test_helpers.ClearSqlxDB(db)
|
||||
tx, err := db.Beginx()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM nodes`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.uncle_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.state_accounts`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.access_list_elements`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = tx.Exec(`DELETE FROM eth_meta.watched_addresses`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = tx.Commit()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -69,10 +96,20 @@ func SetupTestStateDiffIndexer(ctx context.Context, chainConfig *params.ChainCon
|
||||
ChainID: params.TestChainConfig.ChainID.Uint64(),
|
||||
}
|
||||
|
||||
dbconfig, err := postgres.TestConfig.WithEnv()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, dbconfig, true)
|
||||
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, getTestDBConfig())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return stateDiffIndexer
|
||||
}
|
||||
|
||||
func getTestDBConfig() postgres.Config {
|
||||
port, _ := strconv.Atoi(os.Getenv("DATABASE_PORT"))
|
||||
return postgres.Config{
|
||||
Hostname: os.Getenv("DATABASE_HOSTNAME"),
|
||||
DatabaseName: os.Getenv("DATABASE_NAME"),
|
||||
Username: os.Getenv("DATABASE_USER"),
|
||||
Password: os.Getenv("DATABASE_PASSWORD"),
|
||||
Port: port,
|
||||
Driver: postgres.SQLX,
|
||||
}
|
||||
}
|
||||
|
||||
137
scripts/gomoderator.py
Normal file
137
scripts/gomoderator.py
Normal file
@ -0,0 +1,137 @@
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import errno
|
||||
from typing import List, Dict
|
||||
|
||||
"""
|
||||
Resolves dependency conflicts between a plugin repository's and the core repository's go.mods
|
||||
|
||||
Usage: python3 gomoderator.py {path_to_core_repository} {path_to_plugin_repository}
|
||||
"""
|
||||
|
||||
ERROR_INVALID_NAME = 123
|
||||
|
||||
|
||||
def is_pathname_valid(pathname: str) -> bool:
|
||||
"""
|
||||
`True` if the passed pathname is a valid pathname for the current OS;
|
||||
`False` otherwise.
|
||||
"""
|
||||
try:
|
||||
if not isinstance(pathname, str) or not pathname:
|
||||
return False
|
||||
_, pathname = os.path.splitdrive(pathname)
|
||||
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
|
||||
if sys.platform == 'win32' else os.path.sep
|
||||
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
|
||||
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
|
||||
for pathname_part in pathname.split(os.path.sep):
|
||||
try:
|
||||
os.lstat(root_dirname + pathname_part)
|
||||
except OSError as exc:
|
||||
if hasattr(exc, 'winerror'):
|
||||
if exc.winerror == ERROR_INVALID_NAME:
|
||||
return False
|
||||
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
|
||||
return False
|
||||
except TypeError as exc:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def map_deps_to_version(deps_arr: List[str]) -> Dict[str, str]:
|
||||
mapping = {}
|
||||
for d in deps_arr:
|
||||
if d.find(' => ') != -1:
|
||||
ds = d.split(' => ')
|
||||
d = ds[1]
|
||||
d = d.replace(" v", "[>v") # might be able to just split on the empty space not _v and skip this :: insertion
|
||||
d_and_v = d.split("[>")
|
||||
mapping[d_and_v[0]] = d_and_v[1]
|
||||
return mapping
|
||||
|
||||
|
||||
# argument checks
|
||||
assert len(sys.argv) == 3, "need core repository and plugin repository path arguments"
|
||||
core_repository_path = sys.argv[1]
|
||||
plugin_repository_path = sys.argv[2]
|
||||
assert is_pathname_valid(core_repository_path), "core repository path argument is not valid"
|
||||
assert is_pathname_valid(plugin_repository_path), "plugin repository path argument is not valid"
|
||||
|
||||
# collect `go list -m all` output from both repositories; remain in the plugin repository
|
||||
os.chdir(core_repository_path)
|
||||
core_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
|
||||
os.chdir(plugin_repository_path)
|
||||
plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
|
||||
core_deps = core_deps_b.decode("utf-8")
|
||||
core_deps_arr = core_deps.splitlines()
|
||||
del core_deps_arr[0] # first line is the project repo itself
|
||||
plugin_deps = plugin_deps_b.decode("utf-8")
|
||||
plugin_deps_arr = plugin_deps.splitlines()
|
||||
del plugin_deps_arr[0]
|
||||
core_deps_mapping = map_deps_to_version(core_deps_arr)
|
||||
plugin_deps_mapping = map_deps_to_version(plugin_deps_arr)
|
||||
|
||||
# iterate over dependency maps for both repos and find version conflicts
|
||||
# attempt to resolve conflicts by adding adding a `require` for the core version to the plugin's go.mod file
|
||||
none = True
|
||||
for dep, core_version in core_deps_mapping.items():
|
||||
if dep in plugin_deps_mapping.keys():
|
||||
plugin_version = plugin_deps_mapping[dep]
|
||||
if core_version != plugin_version:
|
||||
print(f'{dep} has a conflict: core is using version {core_version} '
|
||||
f'but the plugin is using version {plugin_version}')
|
||||
fixed_dep = f'{dep}@{core_version}'
|
||||
print(f'attempting fix by `go mod edit -require={fixed_dep}')
|
||||
subprocess.check_call(["go", "mod", "edit", f'-require={fixed_dep}'])
|
||||
none = False
|
||||
|
||||
if none:
|
||||
print("no conflicts to resolve")
|
||||
quit()
|
||||
|
||||
# the above process does not work for all dep conflicts e.g. golang.org/x/text v0.3.0 will not stick this way
|
||||
# so we will try the `go get {dep}` route for any remaining conflicts
|
||||
updated_plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
|
||||
updated_plugin_deps = updated_plugin_deps_b.decode("utf-8")
|
||||
updated_plugin_deps_arr = updated_plugin_deps.splitlines()
|
||||
del updated_plugin_deps_arr[0]
|
||||
updated_plugin_deps_mapping = map_deps_to_version(updated_plugin_deps_arr)
|
||||
none = True
|
||||
for dep, core_version in core_deps_mapping.items():
|
||||
if dep in updated_plugin_deps_mapping.keys():
|
||||
updated_plugin_version = updated_plugin_deps_mapping[dep]
|
||||
if core_version != updated_plugin_version:
|
||||
print(f'{dep} still has a conflict: core is using version {core_version} '
|
||||
f'but the plugin is using version {updated_plugin_version}')
|
||||
fixed_dep = f'{dep}@{core_version}'
|
||||
print(f'attempting fix by `go get {fixed_dep}')
|
||||
subprocess.check_call(["go", "get", fixed_dep])
|
||||
none = False
|
||||
|
||||
if none:
|
||||
print("all conflicts have been resolved")
|
||||
quit()
|
||||
|
||||
# iterate over plugins `go list -m all` output one more time and inform whether or not the above has worked
|
||||
final_plugin_deps_b = subprocess.check_output(["go", "list", "-m", "all"])
|
||||
final_plugin_deps = final_plugin_deps_b.decode("utf-8")
|
||||
final_plugin_deps_arr = final_plugin_deps.splitlines()
|
||||
del final_plugin_deps_arr[0]
|
||||
final_plugin_deps_mapping = map_deps_to_version(final_plugin_deps_arr)
|
||||
none = True
|
||||
for dep, core_version in core_deps_mapping.items():
|
||||
if dep in final_plugin_deps_mapping.keys():
|
||||
final_plugin_version = final_plugin_deps_mapping[dep]
|
||||
if core_version != final_plugin_version:
|
||||
print(f'{dep} STILL has a conflict: core is using version {core_version} '
|
||||
f'but the plugin is using version {final_plugin_version}')
|
||||
none = False
|
||||
|
||||
if none:
|
||||
print("all conflicts have been resolved")
|
||||
quit()
|
||||
|
||||
print("failed to resolve all conflicts")
|
||||
15
scripts/install-postgres-11.sh
Executable file
15
scripts/install-postgres-11.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
echo "Installing Postgres 11"
|
||||
sudo service postgresql stop
|
||||
sudo apt-get remove -q 'postgresql-*'
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -q postgresql-11 postgresql-client-11
|
||||
sudo cp /etc/postgresql/{9.6,11}/main/pg_hba.conf
|
||||
|
||||
echo "Restarting Postgres 11"
|
||||
sudo service postgresql restart
|
||||
|
||||
sudo psql -c 'CREATE ROLE travis SUPERUSER LOGIN CREATEDB;' -U postgres
|
||||
27
scripts/reset_db
Executable file
27
scripts/reset_db
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
# Provide me with a postgres database name, and I will:
|
||||
# - Drop the database
|
||||
# - Recreate the database
|
||||
# - Run the vulcanizedb migration
|
||||
|
||||
if [ "$1" = "" ]; then
|
||||
echo "Provide a database name to reset"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
db=$1
|
||||
dir=$(basename "$(pwd)")
|
||||
if [ $dir != "ipld-eth-server" ]
|
||||
then
|
||||
echo "Run me from the ipld-eth-server root dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
user=$(whoami)
|
||||
psql -c "DROP DATABASE $db" postgres
|
||||
if [ $? -eq 0 ]; then
|
||||
psql -c "CREATE DATABASE $db WITH OWNER $user" postgres
|
||||
make migrate HOST_NAME=localhost NAME=$db PORT=5432
|
||||
else
|
||||
echo "Couldnt drop the database. Are you connected? Does it exist?"
|
||||
fi
|
||||
@ -1,56 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
stack_dir=$(readlink -f "$1")
|
||||
[[ -d "$stack_dir" ]]
|
||||
|
||||
laconic_so="laconic-so --verbose --stack $stack_dir"
|
||||
|
||||
CONFIG_DIR=$(readlink -f "${CONFIG_DIR:-$(mktemp -d)}")
|
||||
# By default assume we are running in the project root.
|
||||
export CERC_REPO_BASE_DIR="${CERC_REPO_BASE_DIR:-$(git rev-parse --show-toplevel)/..}"
|
||||
|
||||
# Don't run geth/plugeth in the debugger, it will swallow error backtraces
|
||||
echo CERC_REMOTE_DEBUG=false >> $CONFIG_DIR/stack.env
|
||||
# Passing this lets us run eth_call forwarding tests without running ipld-eth-db
|
||||
echo CERC_RUN_STATEDIFF=${CERC_RUN_STATEDIFF:-true} >> $CONFIG_DIR/stack.env
|
||||
|
||||
set -x
|
||||
|
||||
if [[ -z $SKIP_BUILD ]]; then
|
||||
# Prevent conflicting tty output
|
||||
export BUILDKIT_PROGRESS=plain
|
||||
|
||||
# The server itself will be run separately
|
||||
$laconic_so setup-repositories \
|
||||
--exclude git.vdb.to/cerc-io/ipld-eth-server
|
||||
$laconic_so build-containers \
|
||||
--exclude cerc/ipld-eth-server
|
||||
fi
|
||||
|
||||
if ! $laconic_so deploy \
|
||||
--exclude ipld-eth-server \
|
||||
--env-file $CONFIG_DIR/stack.env \
|
||||
--cluster test up
|
||||
then
|
||||
$laconic_so deploy --cluster test logs
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set +x
|
||||
|
||||
# Get IPv4 endpoint of geth bootnode file server
|
||||
bootnode_endpoint=$(docker port test-fixturenet-eth-bootnode-geth-1 9898 | head -1)
|
||||
|
||||
# Extract the chain config and ID from genesis file
|
||||
curl -s $bootnode_endpoint/geth.json | jq '.config' > $CONFIG_DIR/chain.json
|
||||
|
||||
# Output vars if we are running on Github
|
||||
if [[ -n "$GITHUB_ENV" ]]; then
|
||||
echo ETH_CHAIN_ID="$(jq '.chainId' $CONFIG_DIR/chain.json)" >> "$GITHUB_ENV"
|
||||
echo ETH_CHAIN_CONFIG="$CONFIG_DIR/chain.json" >> "$GITHUB_ENV"
|
||||
echo ETH_HTTP_PATH=$(docker port test-fixturenet-eth-geth-1-1 8545 | head -1) >> "$GITHUB_ENV"
|
||||
# Read a private key so we can send from a funded account
|
||||
echo DEPLOYER_PRIVATE_KEY=$(curl -s $bootnode_endpoint/accounts.csv | head -1 | cut -d',' -f3) >> "$GITHUB_ENV"
|
||||
fi
|
||||
17
scripts/run_integration_test.sh
Executable file
17
scripts/run_integration_test.sh
Executable file
@ -0,0 +1,17 @@
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export ETH_FORWARD_ETH_CALLS=false
|
||||
export DB_WRITE=true
|
||||
export ETH_PROXY_ON_ERROR=false
|
||||
|
||||
export PGPASSWORD=password
|
||||
export DATABASE_USER=vdbm
|
||||
export DATABASE_PORT=8077
|
||||
export DATABASE_PASSWORD=password
|
||||
export DATABASE_HOSTNAME=127.0.0.1
|
||||
|
||||
# Wait for containers to be up and execute the integration test.
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||
make integrationtest
|
||||
17
scripts/run_integration_test_forward_eth_calls.sh
Executable file
17
scripts/run_integration_test_forward_eth_calls.sh
Executable file
@ -0,0 +1,17 @@
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
export ETH_FORWARD_ETH_CALLS=true
|
||||
export DB_WRITE=false
|
||||
export ETH_PROXY_ON_ERROR=false
|
||||
|
||||
export PGPASSWORD=password
|
||||
export DATABASE_USER=vdbm
|
||||
export DATABASE_PORT=8077
|
||||
export DATABASE_PASSWORD=password
|
||||
export DATABASE_HOSTNAME=127.0.0.1
|
||||
|
||||
# Wait for containers to be up and execute the integration test.
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||
make integrationtest
|
||||
16
scripts/run_unit_test.sh
Executable file
16
scripts/run_unit_test.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Remove any existing containers / volumes
|
||||
docker-compose down --remove-orphans --volumes
|
||||
|
||||
# Spin up DB and run migrations
|
||||
docker-compose up -d migrations ipld-eth-db
|
||||
sleep 30
|
||||
|
||||
# Run unit tests
|
||||
go clean -testcache
|
||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
||||
|
||||
# Clean up
|
||||
docker-compose down --remove-orphans --volumes
|
||||
rm -rf out/
|
||||
119
test/README.md
Normal file
119
test/README.md
Normal file
@ -0,0 +1,119 @@
|
||||
# Test Insructions
|
||||
|
||||
## Setup
|
||||
|
||||
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator), [ipld-eth-db](https://github.com/vulcanize/ipld-eth-db) [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||
|
||||
- Checkout [v4 release](https://github.com/vulcanize/ipld-eth-db/releases/tag/v4.2.0-alpha) in ipld-eth-db repo.
|
||||
```bash
|
||||
# In ipld-eth-db repo.
|
||||
git checkout v4.2.0-alpha
|
||||
```
|
||||
|
||||
- Checkout [v4 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.19-statediff-4.1.0-alpha) in go-ethereum repo.
|
||||
```bash
|
||||
# In go-ethereum repo.
|
||||
git checkout v1.10.19-statediff-4.1.0-alpha
|
||||
```
|
||||
|
||||
- Checkout working commit in stack-orchestrator repo.
|
||||
```bash
|
||||
# In stack-orchestrator repo.
|
||||
git checkout f2fd766f5400fcb9eb47b50675d2e3b1f2753702
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
- Run unit tests:
|
||||
|
||||
```bash
|
||||
# In ipld-eth-server root directory.
|
||||
./scripts/run_unit_test.sh
|
||||
```
|
||||
|
||||
- Run integration tests:
|
||||
|
||||
- In stack-orchestrator repo, create config file:
|
||||
|
||||
```bash
|
||||
cd helper-scripts
|
||||
|
||||
./create-config.sh
|
||||
```
|
||||
|
||||
A `config.sh` will be created in the root directory.
|
||||
|
||||
- Update/Edit the generated config file with:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_db=~/ipld-eth-db/
|
||||
|
||||
# Path to go-ethereum repo.
|
||||
vulcanize_go_ethereum=~/go-ethereum/
|
||||
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=true
|
||||
eth_forward_eth_calls=false
|
||||
eth_proxy_on_error=false
|
||||
eth_http_path="go-ethereum:8545"
|
||||
```
|
||||
|
||||
- Run stack-orchestrator:
|
||||
|
||||
```bash
|
||||
# In stack-orchestrator root directory.
|
||||
cd helper-scripts
|
||||
|
||||
./wrapper.sh \
|
||||
-e docker \
|
||||
-d ../docker/local/docker-compose-db-sharding.yml \
|
||||
-d ../docker/local/docker-compose-go-ethereum.yml \
|
||||
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
||||
-d ../docker/local/docker-compose-contract.yml \
|
||||
-v remove \
|
||||
-p ../config.sh
|
||||
```
|
||||
|
||||
- Run test:
|
||||
|
||||
```bash
|
||||
# In ipld-eth-server root directory.
|
||||
./scripts/run_integration_test.sh
|
||||
```
|
||||
|
||||
- Update stack-orchestrator `config.sh` file:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Path to go-ethereum repo.
|
||||
vulcanize_go_ethereum=~/go-ethereum/
|
||||
|
||||
# Path to ipld-eth-server repo.
|
||||
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||
|
||||
# Path to test contract.
|
||||
vulcanize_test_contract=~/ipld-eth-server/test/contract
|
||||
|
||||
genesis_file_path='start-up-files/go-ethereum/genesis.json'
|
||||
db_write=false
|
||||
eth_forward_eth_calls=true
|
||||
eth_proxy_on_error=false
|
||||
eth_http_path="go-ethereum:8545"
|
||||
```
|
||||
|
||||
- Stop the stack-orchestrator and start again using the same command
|
||||
|
||||
- Run integration tests for direct proxy fall-through of eth_calls:
|
||||
```bash
|
||||
./scripts/run_integration_test_forward_eth_calls.sh
|
||||
```
|
||||
@ -1,26 +0,0 @@
|
||||
# Containers to run a DB instance for tests
|
||||
|
||||
services:
|
||||
migrations:
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.4.0-alpha
|
||||
environment:
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_NAME: "cerc_testing"
|
||||
DATABASE_PASSWORD: "password"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
|
||||
ipld-eth-db:
|
||||
container_name: test-ipld-eth-db
|
||||
image: timescale/timescaledb:latest-pg14
|
||||
restart: always
|
||||
command: ["postgres", "-c", "log_statement=all"]
|
||||
environment:
|
||||
POSTGRES_USER: "vdbm"
|
||||
POSTGRES_DB: "cerc_testing"
|
||||
POSTGRES_PASSWORD: "password"
|
||||
ports:
|
||||
- 127.0.0.1:8077:5432
|
||||
@ -1,46 +0,0 @@
|
||||
# Runs the IPLD server and contract deployment server
|
||||
|
||||
services:
|
||||
ipld-eth-server:
|
||||
restart: unless-stopped
|
||||
image: cerc/ipld-eth-server:local
|
||||
build:
|
||||
context: ..
|
||||
networks:
|
||||
- test_default
|
||||
environment:
|
||||
DATABASE_NAME: "cerc_testing"
|
||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_PASSWORD: "password"
|
||||
ETH_HTTP_PATH: fixturenet-eth-geth-1:8545
|
||||
ETH_CHAIN_CONFIG: /tmp/chain.json
|
||||
ETH_PROXY_ON_ERROR: false
|
||||
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS
|
||||
SERVER_HTTP_PATH: 0.0.0.0:8081
|
||||
VDB_COMMAND: serve
|
||||
LOG_LEVEL: debug
|
||||
volumes:
|
||||
- type: bind
|
||||
source: $ETH_CHAIN_CONFIG
|
||||
target: /tmp/chain.json
|
||||
ports:
|
||||
- 127.0.0.1:8081:8081
|
||||
|
||||
contract-deployer:
|
||||
restart: on-failure
|
||||
image: cerc/ipld-eth-server/test-contract-deployer:local
|
||||
build: ./contract
|
||||
networks:
|
||||
- test_default
|
||||
environment:
|
||||
ETH_ADDR: "http://fixturenet-eth-geth-1:8545"
|
||||
ETH_CHAIN_ID: $ETH_CHAIN_ID
|
||||
DEPLOYER_PRIVATE_KEY: $DEPLOYER_PRIVATE_KEY
|
||||
ports:
|
||||
- 127.0.0.1:3000:3000
|
||||
|
||||
networks:
|
||||
test_default:
|
||||
external: true
|
||||
@ -1,5 +1,7 @@
|
||||
# Downgrade from 18.16, see https://github.com/NomicFoundation/hardhat/issues/3877
|
||||
FROM node:20-slim
|
||||
FROM node:14
|
||||
|
||||
ARG ETH_ADDR
|
||||
ENV ETH_ADDR $ETH_ADDR
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY package*.json ./
|
||||
@ -9,4 +11,4 @@ RUN npm run compile && ls -lah
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
ENTRYPOINT ["node", "src/index.js"]
|
||||
ENTRYPOINT ["npm", "start"]
|
||||
@ -1,16 +1,10 @@
|
||||
pragma solidity ^0.8.25;
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
||||
|
||||
contract GLDToken is ERC20 {
|
||||
constructor() ERC20("Gold", "GLD") {
|
||||
_mint(msg.sender, 1000000000000000000000);
|
||||
}
|
||||
|
||||
function destroy() public {
|
||||
(bool ok, ) = payable(msg.sender).call{value: address(this).balance}("");
|
||||
require(ok, "ETH transfer failed");
|
||||
|
||||
_burn(msg.sender, balanceOf(msg.sender));
|
||||
selfdestruct(payable(msg.sender));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: AGPL-3.0
|
||||
pragma solidity ^0.8.25;
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
||||
|
||||
@ -7,9 +7,7 @@ contract SLVToken is ERC20 {
|
||||
uint256 private countA;
|
||||
uint256 private countB;
|
||||
|
||||
constructor() ERC20("Silver", "SLV") {
|
||||
/* _mint(address(this), 1); */
|
||||
}
|
||||
constructor() ERC20("Silver", "SLV") {}
|
||||
|
||||
function incrementCountA() public {
|
||||
countA = countA + 1;
|
||||
@ -22,9 +20,6 @@ contract SLVToken is ERC20 {
|
||||
receive() external payable {}
|
||||
|
||||
function destroy() public {
|
||||
(bool ok, ) = payable(msg.sender).call{value: address(this).balance}("");
|
||||
require(ok, "ETH transfer failed");
|
||||
|
||||
/* _burn(address(this), balanceOf(address(this))); */
|
||||
selfdestruct(payable(msg.sender));
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,19 +16,9 @@ task("accounts", "Prints the list of accounts", async () => {
|
||||
/**
|
||||
* @type import('hardhat/config').HardhatUserConfig
|
||||
*/
|
||||
|
||||
const localNetwork = {
|
||||
url: process.env.ETH_ADDR || "http://127.0.0.1:8545",
|
||||
chainId: Number(process.env.ETH_CHAIN_ID) || 99,
|
||||
};
|
||||
|
||||
if (process.env.DEPLOYER_PRIVATE_KEY) {
|
||||
localNetwork["accounts"] = [process.env.DEPLOYER_PRIVATE_KEY];
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
solidity: {
|
||||
version: "0.8.25",
|
||||
version: "0.8.0",
|
||||
settings: {
|
||||
outputSelection: {
|
||||
'*': {
|
||||
@ -40,7 +30,14 @@ module.exports = {
|
||||
}
|
||||
},
|
||||
networks: {
|
||||
local: localNetwork
|
||||
},
|
||||
defaultNetwork: "local"
|
||||
local: {
|
||||
url: 'http://127.0.0.1:8545',
|
||||
chainId: 99
|
||||
},
|
||||
docker: {
|
||||
url: process.env.ETH_ADDR,
|
||||
chainId: 99
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user