Compare commits
254 Commits
v0.3.2-alp
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
e3c2e92265 | ||
|
8e652a634f | ||
|
1fc53ccab1 | ||
|
31e9a7dc5e | ||
c19cc5c44d | |||
6bd563e3d5 | |||
b3d9e01d67 | |||
0c44882cb2 | |||
80413211c2 | |||
7f15befdee | |||
217cfc63ec | |||
9550d60467 | |||
da1c8b2332 | |||
fe7329c284 | |||
f144d932cd | |||
|
248d9eb7a2 | ||
|
be1d4281e8 | ||
|
e2b83a5737 | ||
|
613dd6acf4 | ||
|
8966d1b6c2 | ||
|
c556845d1a | ||
|
cf8c08f01e | ||
|
5c6fb45d36 | ||
|
222fa6d6ed | ||
|
6c82eafef1 | ||
|
89b9a05234 | ||
|
27d9413544 | ||
|
4817674c5c | ||
|
8556332ff8 | ||
c25d220f71 | |||
|
be34f0e8fd | ||
|
025b1c7c9e | ||
|
da30a957a7 | ||
705835512e | |||
2a0e9f8dfd | |||
c8bdaefe97 | |||
a141d154b7 | |||
76986d9497 | |||
233fa29740 | |||
4883590d85 | |||
181e3745f1 | |||
4b697b2a98 | |||
77812f2673 | |||
849b17f4bd | |||
56c85709c1 | |||
2be2a06575 | |||
39fb9207b2 | |||
3914889d53 | |||
1a6ff273ba | |||
|
f57b816530 | ||
76aaa93c50 | |||
778bf82dfd | |||
e8c1db69b4 | |||
86eee94f9b | |||
d785fda414 | |||
41e04a1c30 | |||
3aa5cb36ef | |||
43ddbc7eea | |||
8df8b50cb1 | |||
072ba1edcc | |||
|
f61691a26e | ||
|
6ab34eb878 | ||
|
3e211d5978 | ||
|
f600ea46bc | ||
|
6fa38fd198 | ||
|
2fa941f084 | ||
|
14332c2cd9 | ||
|
a780782bb6 | ||
|
627f2c7f81 | ||
|
925b22869a | ||
|
e6fb859967 | ||
|
850b305bf6 | ||
|
2f6f939982 | ||
|
409521416b | ||
|
72d3174f63 | ||
|
e288a2933d | ||
|
1020ec18a4 | ||
|
3118bf4964 | ||
|
61e6585f1c | ||
|
e924974ece | ||
|
20b75ff18f | ||
|
ef65993412 | ||
|
520be6bc86 | ||
|
01074ab55c | ||
|
88e20dfc6e | ||
|
4446219c36 | ||
|
88a89c0cc2 | ||
|
63e77c9bc0 | ||
|
19570f733f | ||
|
3058dc48a8 | ||
|
5d86b6e029 | ||
|
3b863dc76b | ||
|
e92b66638e | ||
|
cb39a94792 | ||
|
ef4d1f958d | ||
|
de12fab935 | ||
|
34a6d3af4b | ||
|
c28ebcc4f2 | ||
|
93a995627c | ||
|
316bf0990a | ||
|
143d79fdfc | ||
|
c07ee1d78a | ||
|
c3bcb8138b | ||
|
f733250afe | ||
|
e6869f4236 | ||
|
f3c247b54b | ||
|
ace7eaad7d | ||
|
45de492b4c | ||
|
b56215db3f | ||
|
f9a31ed862 | ||
|
84e22e3b9c | ||
|
907c7132f4 | ||
|
373d24e26b | ||
|
34a2e2369c | ||
|
1b513a3c02 | ||
|
bd093e0ea6 | ||
|
19e3f04f29 | ||
|
140989cbf7 | ||
|
68152b0d77 | ||
|
6d676a342d | ||
|
b912b7e75a | ||
|
9347affb77 | ||
|
61e04e1d9a | ||
|
c3d267a6d4 | ||
|
fcd2b963b0 | ||
|
20c04a98bc | ||
|
25b49f784b | ||
|
2a1ec043b8 | ||
|
e9b5e6b995 | ||
|
d53bfae243 | ||
|
2fc57c2f31 | ||
|
f184b9fd49 | ||
|
bc986f7aad | ||
|
1973e8032d | ||
|
58cb6c252d | ||
|
9fef3687a0 | ||
|
88ced60707 | ||
|
cc6822f7a6 | ||
|
0f2b6fd843 | ||
|
13e0a5cd19 | ||
|
b0674df3ce | ||
|
e81da697bc | ||
|
7eb7849b5e | ||
|
500bba43b4 | ||
|
f09f665b11 | ||
|
a1781b1eeb | ||
|
f6a6a294bb | ||
|
81546618d3 | ||
|
54db8f23e0 | ||
|
1057b001f1 | ||
|
3a216b2ca3 | ||
|
bca33381dd | ||
|
70c539c8d7 | ||
|
ec3165f62c | ||
|
c3eab5d58e | ||
|
6d6f97169b | ||
|
77d28264f7 | ||
|
885d934c90 | ||
|
5acecec955 | ||
|
5772d52eb1 | ||
|
2de9c5bd48 | ||
|
838ed033f8 | ||
|
cf4543961c | ||
|
f9abcfd33c | ||
|
32b4f56557 | ||
|
ed4171a1ab | ||
|
7f0c8fb2a1 | ||
|
04a0f9a751 | ||
|
1467ea3924 | ||
|
458aae1c1e | ||
|
4d9edd2008 | ||
|
121c75cc1d | ||
|
967c148eff | ||
|
71837c4b24 | ||
|
d0c3241730 | ||
|
a28892f1d3 | ||
|
3d1b308326 | ||
|
c0a4600ce7 | ||
|
b3e4fbfa39 | ||
|
000c0ef066 | ||
|
d8a5358a70 | ||
|
e00e602098 | ||
|
148addb24d | ||
|
2630e2d8dc | ||
|
924d0be0b9 | ||
|
70f7face75 | ||
|
afc63ac960 | ||
|
f1a61d0991 | ||
|
9a5581b543 | ||
|
1d18d1fed8 | ||
|
0a14bd9f0f | ||
|
a3ca08b653 | ||
|
b2828a814f | ||
|
a284a566d5 | ||
|
b90fcb53e6 | ||
|
18266c4f9d | ||
|
42f066772b | ||
|
9aaea6e016 | ||
|
969da82f6e | ||
|
f7fe3c2fc1 | ||
|
c4f7fa2821 | ||
|
b1a6fb0514 | ||
|
e7744cce4b | ||
|
491d779d58 | ||
|
2c1bc557e5 | ||
|
9cf75ca66c | ||
|
3b3716499f | ||
|
b3ef2934b7 | ||
|
8c16dc2b35 | ||
|
7babaf6c05 | ||
|
954e028306 | ||
|
6844c60f7b | ||
|
97d8c4dc86 | ||
|
1141f3909c | ||
|
c62799fdd6 | ||
|
cef4b1ddc6 | ||
|
39141bd30d | ||
|
0aa0f38c93 | ||
|
8ac6d48772 | ||
|
bcbd2de5f3 | ||
|
d09b756768 | ||
|
36b62f9b72 | ||
|
9b960a105f | ||
|
2e5286baef | ||
|
f8b9d9475e | ||
|
9d590e15bc | ||
|
d5cf74e202 | ||
|
7d88e78fa8 | ||
|
9d6791706d | ||
|
50d53535bb | ||
|
c813ff00f4 | ||
|
53f06a66f5 | ||
|
3c57fa1064 | ||
|
141b3eaffe | ||
|
31cbaec567 | ||
|
75debec01a | ||
|
07519468e9 | ||
|
524ab42674 | ||
|
325516da70 | ||
|
08df7beca3 | ||
|
f4ff261b21 | ||
|
aff66d950b | ||
|
677cdf0a72 | ||
|
72e830e164 | ||
|
6b919a8734 | ||
|
86aa1c16e6 | ||
|
4ea61b08ca | ||
|
88e499e5d3 | ||
|
df454c414e | ||
|
5200fd71dc | ||
|
86f3f44cac | ||
|
f6780ddd95 | ||
|
8d10dc98ee | ||
|
d3f30b621b |
29
.github/workflows/issues-notion-sync.yml
vendored
Normal file
29
.github/workflows/issues-notion-sync.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
name: Notion Sync
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
issues:
|
||||||
|
types:
|
||||||
|
[
|
||||||
|
opened,
|
||||||
|
edited,
|
||||||
|
labeled,
|
||||||
|
unlabeled,
|
||||||
|
assigned,
|
||||||
|
unassigned,
|
||||||
|
milestoned,
|
||||||
|
demilestoned,
|
||||||
|
reopened,
|
||||||
|
closed,
|
||||||
|
]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
notion_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Add GitHub Issues to Notion
|
||||||
|
steps:
|
||||||
|
- name: Add GitHub Issues to Notion
|
||||||
|
uses: vulcanize/notion-github-action@v1.2.4-issueid
|
||||||
|
with:
|
||||||
|
notion-token: ${{ secrets.NOTION_TOKEN }}
|
||||||
|
notion-db: ${{ secrets.NOTION_DATABASE }}
|
25
.github/workflows/on-master.yaml
vendored
25
.github/workflows/on-master.yaml
vendored
@ -1,25 +0,0 @@
|
|||||||
name: Docker Compose Build
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
- name: Docker Push
|
|
||||||
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
|
||||||
|
|
37
.github/workflows/on-pr.yaml
vendored
37
.github/workflows/on-pr.yaml
vendored
@ -1,35 +1,10 @@
|
|||||||
name: Docker Build
|
name: Docker Build
|
||||||
|
|
||||||
on: [pull_request]
|
on: [pull_request]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
run-tests:
|
||||||
name: Run docker build
|
uses: ./.github/workflows/tests.yaml
|
||||||
runs-on: ubuntu-latest
|
secrets:
|
||||||
steps:
|
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||||
- uses: actions/checkout@v2
|
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||||
- name: Run docker build
|
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||||
run: make docker-build
|
|
||||||
test:
|
|
||||||
name: Run integration tests
|
|
||||||
env:
|
|
||||||
GOPATH: /tmp/go
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go-version: [1.14.x, 1.15.x]
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go-version }}
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Run database
|
|
||||||
run: docker-compose up -d db
|
|
||||||
- name: Test
|
|
||||||
run: |
|
|
||||||
sleep 10
|
|
||||||
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 make test
|
|
||||||
|
26
.github/workflows/publish.yaml
vendored
26
.github/workflows/publish.yaml
vendored
@ -3,9 +3,34 @@ on:
|
|||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
jobs:
|
jobs:
|
||||||
|
run-tests:
|
||||||
|
uses: ./.github/workflows/tests.yaml
|
||||||
|
secrets:
|
||||||
|
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||||
|
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||||
|
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||||
|
build:
|
||||||
|
name: Run docker build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: run-tests
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Get the version
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||||
|
- name: Run docker build
|
||||||
|
run: make docker-build
|
||||||
|
- name: Tag docker image
|
||||||
|
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||||
|
- name: Docker Login
|
||||||
|
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
|
- name: Docker Push
|
||||||
|
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
|
||||||
|
|
||||||
push_to_registries:
|
push_to_registries:
|
||||||
name: Push Docker image to Docker Hub
|
name: Push Docker image to Docker Hub
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
steps:
|
steps:
|
||||||
- name: Get the version
|
- name: Get the version
|
||||||
id: vars
|
id: vars
|
||||||
@ -22,4 +47,3 @@ jobs:
|
|||||||
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||||
- name: Docker Push to Docker Hub
|
- name: Docker Push to Docker Hub
|
||||||
run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
|
||||||
|
|
||||||
|
29
.github/workflows/run_unit_test.sh
vendored
Executable file
29
.github/workflows/run_unit_test.sh
vendored
Executable file
@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Set up repo
|
||||||
|
start_dir=$(pwd)
|
||||||
|
temp_dir=$(mktemp -d)
|
||||||
|
cd $temp_dir
|
||||||
|
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
|
||||||
|
cd ipld-eth-server
|
||||||
|
|
||||||
|
## Remove the branch and github related info. This way future runs wont be confused.
|
||||||
|
rm -f /tmp/git_head_ref /tmp/git_repository
|
||||||
|
|
||||||
|
# Spin up DB
|
||||||
|
docker-compose -f docker-compose.yml up -d ipld-eth-db
|
||||||
|
trap "docker-compose down --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
# Remove old logs so there's no confusion, then run test
|
||||||
|
rm -f /tmp/test.log /tmp/return_test.txt
|
||||||
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
|
||||||
|
echo $? > /tmp/return_test.txt
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
docker-compose -f docker-compose.yml down -v --remove-orphans
|
||||||
|
cd $start_dir
|
||||||
|
rm -fr $temp_dir
|
||||||
|
|
189
.github/workflows/tests.yaml
vendored
Normal file
189
.github/workflows/tests.yaml
vendored
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
name: Test the stack.
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
secrets:
|
||||||
|
BUILD_HOSTNAME:
|
||||||
|
required: true
|
||||||
|
BUILD_USERNAME:
|
||||||
|
required: true
|
||||||
|
BUILD_KEY:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Run docker build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Run docker build
|
||||||
|
run: make docker-build
|
||||||
|
test:
|
||||||
|
name: Run unit tests
|
||||||
|
env:
|
||||||
|
GOPATH: /tmp/go
|
||||||
|
# To run the unit tests you need to add secrets to your repository.
|
||||||
|
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
|
||||||
|
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
|
||||||
|
BUILD_KEY: ${{ secrets.BUILD_KEY }}
|
||||||
|
#strategy:
|
||||||
|
# matrix:
|
||||||
|
# go-version: [1.16.x, 1.17.x]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Passed experience with GHA has taught me to store variables in files instead of passing them as variables.
|
||||||
|
- name: Output variables to files
|
||||||
|
run: |
|
||||||
|
echo $GITHUB_REPOSITORY > /tmp/git_repository
|
||||||
|
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
|
||||||
|
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||||
|
echo ${{ env.BUILD_KEY }} >> /tmp/key
|
||||||
|
echo "-----END OPENSSH PRIVATE KEY-----" >> /tmp/key
|
||||||
|
chmod 400 /tmp/key
|
||||||
|
cat /tmp/git_repository
|
||||||
|
cat /tmp/git_head_ref
|
||||||
|
echo
|
||||||
|
|
||||||
|
- name: Raw SCP
|
||||||
|
run: |
|
||||||
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
|
||||||
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
|
||||||
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
|
||||||
|
|
||||||
|
- name: Trigger Unit Test
|
||||||
|
run: |
|
||||||
|
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} go install github.com/onsi/ginkgo/ginkgo@latest
|
||||||
|
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} /tmp/run_unit_test.sh
|
||||||
|
|
||||||
|
- name: Get the logs and cat them
|
||||||
|
run: |
|
||||||
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/test.log .
|
||||||
|
cat ./test.log
|
||||||
|
|
||||||
|
- name: Check Error Code
|
||||||
|
run: |
|
||||||
|
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/return_test.txt .
|
||||||
|
[ $(cat ./return_test.txt) -eq 0 ]
|
||||||
|
|
||||||
|
integrationtest:
|
||||||
|
name: Run integration tests
|
||||||
|
env:
|
||||||
|
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||||
|
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
||||||
|
GOPATH: /tmp/go
|
||||||
|
DB_WRITE: true
|
||||||
|
ETH_FORWARD_ETH_CALLS: false
|
||||||
|
ETH_PROXY_ON_ERROR: false
|
||||||
|
ETH_HTTP_PATH: "go-ethereum:8545"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Create GOPATH
|
||||||
|
run: mkdir -p /tmp/go
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ">=1.18.0"
|
||||||
|
check-latest: true
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
path: "./ipld-eth-server"
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||||
|
path: "./stack-orchestrator/"
|
||||||
|
repository: vulcanize/stack-orchestrator
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||||
|
repository: vulcanize/go-ethereum
|
||||||
|
path: "./go-ethereum/"
|
||||||
|
- name: Create config file
|
||||||
|
run: |
|
||||||
|
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||||
|
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||||
|
echo db_write=$DB_WRITE >> ./config.sh
|
||||||
|
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||||
|
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||||
|
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||||
|
cat ./config.sh
|
||||||
|
- name: Build geth
|
||||||
|
run: |
|
||||||
|
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
||||||
|
./compile-geth.sh \
|
||||||
|
-p "$GITHUB_WORKSPACE/config.sh" \
|
||||||
|
-e docker
|
||||||
|
- name: Run docker compose
|
||||||
|
run: |
|
||||||
|
docker-compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||||
|
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||||
|
up -d --build
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
cd $GITHUB_WORKSPACE/ipld-eth-server
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||||
|
make integrationtest
|
||||||
|
|
||||||
|
integrationtest_forwardethcalls:
|
||||||
|
name: Run integration tests for direct proxy fall-through of eth_calls
|
||||||
|
env:
|
||||||
|
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||||
|
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
|
||||||
|
GOPATH: /tmp/go
|
||||||
|
DB_WRITE: false
|
||||||
|
ETH_FORWARD_ETH_CALLS: true
|
||||||
|
ETH_PROXY_ON_ERROR: false
|
||||||
|
ETH_HTTP_PATH: "go-ethereum:8545"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Create GOPATH
|
||||||
|
run: mkdir -p /tmp/go
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ">=1.18.0"
|
||||||
|
check-latest: true
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
path: "./ipld-eth-server"
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
|
||||||
|
path: "./stack-orchestrator/"
|
||||||
|
repository: vulcanize/stack-orchestrator
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
ref: ${{ env.GO_ETHEREUM_REF }}
|
||||||
|
repository: vulcanize/go-ethereum
|
||||||
|
path: "./go-ethereum/"
|
||||||
|
- name: Create config file
|
||||||
|
run: |
|
||||||
|
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
|
||||||
|
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
|
||||||
|
echo db_write=$DB_WRITE >> ./config.sh
|
||||||
|
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
|
||||||
|
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
|
||||||
|
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
|
||||||
|
cat ./config.sh
|
||||||
|
- name: Build geth
|
||||||
|
run: |
|
||||||
|
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
|
||||||
|
./compile-geth.sh \
|
||||||
|
-p "$GITHUB_WORKSPACE/config.sh" \
|
||||||
|
-e docker
|
||||||
|
- name: Run docker compose
|
||||||
|
run: |
|
||||||
|
docker-compose \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
|
||||||
|
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
|
||||||
|
--env-file "$GITHUB_WORKSPACE/config.sh" \
|
||||||
|
up -d --build
|
||||||
|
- name: Test
|
||||||
|
run: |
|
||||||
|
cd $GITHUB_WORKSPACE/ipld-eth-server
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||||
|
make integrationtest
|
16
Dockerfile
16
Dockerfile
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.13-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
RUN apk --update --no-cache add make git g++ linux-headers
|
RUN apk --update --no-cache add make git g++ linux-headers
|
||||||
# DEBUG
|
# DEBUG
|
||||||
@ -6,8 +6,17 @@ RUN apk add busybox-extras
|
|||||||
|
|
||||||
# Build ipld-eth-server
|
# Build ipld-eth-server
|
||||||
WORKDIR /go/src/github.com/vulcanize/ipld-eth-server
|
WORKDIR /go/src/github.com/vulcanize/ipld-eth-server
|
||||||
ADD . .
|
|
||||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
|
# Cache the modules
|
||||||
|
ENV GO111MODULE=on
|
||||||
|
COPY go.mod .
|
||||||
|
COPY go.sum .
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
|
||||||
|
|
||||||
# Copy migration tool
|
# Copy migration tool
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
@ -35,7 +44,6 @@ COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-serv
|
|||||||
# keep binaries immutable
|
# keep binaries immutable
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server
|
||||||
COPY --from=builder /goose goose
|
COPY --from=builder /goose goose
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/db/migrations migrations/vulcanizedb
|
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments
|
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments
|
||||||
|
|
||||||
ENTRYPOINT ["/app/entrypoint.sh"]
|
ENTRYPOINT ["/app/entrypoint.sh"]
|
||||||
|
45
Makefile
45
Makefile
@ -3,10 +3,6 @@ BASE = $(GOPATH)/src/$(PACKAGE)
|
|||||||
PKGS = go list ./... | grep -v "^vendor/"
|
PKGS = go list ./... | grep -v "^vendor/"
|
||||||
|
|
||||||
# Tools
|
# Tools
|
||||||
## Testing library
|
|
||||||
GINKGO = $(BIN)/ginkgo
|
|
||||||
$(BIN)/ginkgo:
|
|
||||||
go get -u github.com/onsi/ginkgo/ginkgo
|
|
||||||
|
|
||||||
## Migration tool
|
## Migration tool
|
||||||
GOOSE = $(BIN)/goose
|
GOOSE = $(BIN)/goose
|
||||||
@ -26,8 +22,9 @@ $(BIN)/gometalinter.v2:
|
|||||||
|
|
||||||
|
|
||||||
.PHONY: installtools
|
.PHONY: installtools
|
||||||
installtools: | $(LINT) $(GOOSE) $(GINKGO)
|
installtools: | $(LINT) $(GOOSE)
|
||||||
echo "Installing tools"
|
echo "Installing tools"
|
||||||
|
go mod download
|
||||||
|
|
||||||
.PHONY: metalint
|
.PHONY: metalint
|
||||||
metalint: | $(METALINT)
|
metalint: | $(METALINT)
|
||||||
@ -54,46 +51,22 @@ TEST_CONNECT_STRING = postgresql://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATAB
|
|||||||
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: | $(GINKGO) $(GOOSE)
|
test: | $(GOOSE)
|
||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
export PGPASSWORD=$(DATABASE_PASSWORD)
|
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test
|
||||||
dropdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) --if-exists $(TEST_DB)
|
|
||||||
createdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) $(TEST_DB)
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING)" up
|
|
||||||
$(GINKGO) -r --skipPackage=integration_tests,integration
|
|
||||||
|
|
||||||
.PHONY: integrationtest
|
.PHONY: integrationtest
|
||||||
integrationtest: | $(GINKGO) $(GOOSE)
|
integrationtest: | $(GOOSE)
|
||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
export PGPASSWORD=$(DATABASE_PASSWORD)
|
go run github.com/onsi/ginkgo/ginkgo -r test/ -v
|
||||||
dropdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) --if-exists $(TEST_DB)
|
|
||||||
createdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) $(TEST_DB)
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING)" up
|
|
||||||
$(GINKGO) -r integration_test/
|
|
||||||
|
|
||||||
.PHONY: test_local
|
.PHONY: test_local
|
||||||
test_local: | $(GINKGO) $(GOOSE)
|
test_local: | $(GOOSE)
|
||||||
go vet ./...
|
go vet ./...
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
dropdb -h $(HOST_NAME) -p $(PORT) -U $(USER) --if-exists $(TEST_DB)
|
./scripts/run_unit_test.sh
|
||||||
createdb -h $(HOST_NAME) -p $(PORT) -U $(USER) $(TEST_DB)
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" up
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" reset
|
|
||||||
make migrate NAME=$(TEST_DB)
|
|
||||||
$(GINKGO) -r --skipPackage=integration_tests,integration
|
|
||||||
|
|
||||||
.PHONY: integrationtest_local
|
|
||||||
integrationtest_local: | $(GINKGO) $(GOOSE)
|
|
||||||
go vet ./...
|
|
||||||
go fmt ./...
|
|
||||||
dropdb -h $(HOST_NAME) -p $(PORT) -U $(USER) --if-exists $(TEST_DB)
|
|
||||||
createdb -h $(HOST_NAME) -p $(PORT) -U $(USER) $(TEST_DB)
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" up
|
|
||||||
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" reset
|
|
||||||
make migrate NAME=$(TEST_DB)
|
|
||||||
$(GINKGO) -r integration_test/
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
@ -162,4 +135,4 @@ import:
|
|||||||
## Build docker image
|
## Build docker image
|
||||||
.PHONY: docker-build
|
.PHONY: docker-build
|
||||||
docker-build:
|
docker-build:
|
||||||
docker build -t vulcanize/ipld-eth-server .
|
docker build -t vulcanize/ipld-eth-server .
|
||||||
|
34
README.md
34
README.md
@ -33,9 +33,9 @@ External dependency
|
|||||||
## Install
|
## Install
|
||||||
Start by downloading ipld-eth-server and moving into the repo:
|
Start by downloading ipld-eth-server and moving into the repo:
|
||||||
|
|
||||||
`GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server`
|
`GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server/v3`
|
||||||
|
|
||||||
`cd $GOPATH/src/github.com/vulcanize/ipld-eth-server`
|
`cd $GOPATH/src/github.com/vulcanize/ipld-eth-server/v3@v3.x.x`
|
||||||
|
|
||||||
Then, build the binary:
|
Then, build the binary:
|
||||||
|
|
||||||
@ -121,9 +121,35 @@ The currently supported standard endpoints are:
|
|||||||
|
|
||||||
TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice)
|
TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice)
|
||||||
|
|
||||||
|
|
||||||
|
### CLI Options and Environment variables
|
||||||
|
|
||||||
|
|
||||||
|
| CLI Option | Environment Variable | Default Value | Comment |
|
||||||
|
| ----------------------------- | ----------------------------- | ---------------- | ----------------------------------- |
|
||||||
|
| `database-hostname` | `DATABASE_HOSTNAME` | localhost | IPLD database host |
|
||||||
|
| `database-port` | `DATABASE_PORT` | 5432 | IPLD database port |
|
||||||
|
| `database-name` | `DATABASE_NAME` | vulcanize_public | IPLD database name |
|
||||||
|
| `database-user` | `DATABASE_USER` | | IPLD database user |
|
||||||
|
| `database-password` | `DATABASE_PASSWORD` | | IPLD database password |
|
||||||
|
| `eth-server-graphql` | `ETH_SERVER_GRAPHQL` | false | If `true` enable Eth GraphQL Server |
|
||||||
|
| `eth-server-graphql-path` | `ETH_SERVER_GRAPHQLPATH` | | If `eth-server-graphql` set to true, endpoint url for graphql server (host:port) |
|
||||||
|
| `eth-server-http` | `ETH_SERVER_HTTP` | true | If `true` enable Eth HTTP JSON-RPC Server |
|
||||||
|
| `eth-server-http-path` | `ETH_SERVER_HTTPPATH` | | If `eth-server-http` set to `true`, endpoint url for Eth HTTP JSON-RPC server (host:port) |
|
||||||
|
| `eth-server-ws` | `ETH_SERVER_WS` | false | If `true` enable Eth WS JSON-RPC Server |
|
||||||
|
| `eth-server-ws-path` | `ETH_SERVER_WSPATH` | | If `eth-server-ws` set to `true`, endpoint url for Eth WS JSON-RPC server (host:port) |
|
||||||
|
| `eth-server-ipc` | `ETH_SERVER_IPC` | false | If `true` enable Eth IPC JSON-RPC Server |
|
||||||
|
| `eth-server-ipc-path` | `ETH_SERVER_IPC_PATH` | | If `eth-server-ws` set to `true`, path for Eth IPC JSON-RPC server |
|
||||||
|
| `ipld-server-graphql` | `IPLD_SERVER_GRAPHQL` | false | If `true` enable IPLD GraphQL Server |
|
||||||
|
| `ipld-server-graphql-path` | `IPLD_SERVER_GRAPHQLPATH` | | If `ipld-server-graphql` set to true, endpoint url for graphql server (host:port) |
|
||||||
|
| `ipld-postgraphile-path` | `IPLD_POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of IPLD db |
|
||||||
|
| `tracing-http-path` | `TRACING_HTTPPATH` | | If `ipld-server-graphql` set to true, http url for tracing server |
|
||||||
|
| `tracing-postgraphile-path` | `TRACING.POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of tracing db |
|
||||||
|
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
`make test` will run the unit tests
|
|
||||||
`make test` setups a clean `vulcanize_testing` db
|
Follow steps in [test/README.md](./test/README.md)
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
|
16
chain.json
Normal file
16
chain.json
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"chainId": 4,
|
||||||
|
"homesteadBlock": 1,
|
||||||
|
"eip150Block": 2,
|
||||||
|
"eip150Hash": "0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9",
|
||||||
|
"eip155Block": 3,
|
||||||
|
"eip158Block": 3,
|
||||||
|
"byzantiumBlock": 3,
|
||||||
|
"constantinopleBlock": 3,
|
||||||
|
"petersburgBlock": 3,
|
||||||
|
"istanbulBlock": 3,
|
||||||
|
"clique": {
|
||||||
|
"period": 15,
|
||||||
|
"epoch": 30000
|
||||||
|
}
|
||||||
|
}
|
37
cmd/common.go
Normal file
37
cmd/common.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright © 2021 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func addDatabaseFlags(command *cobra.Command) {
|
||||||
|
// database flags
|
||||||
|
command.PersistentFlags().String("database-name", "vulcanize_public", "database name")
|
||||||
|
command.PersistentFlags().Int("database-port", 5432, "database port")
|
||||||
|
command.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
||||||
|
command.PersistentFlags().String("database-user", "", "database user")
|
||||||
|
command.PersistentFlags().String("database-password", "", "database password")
|
||||||
|
|
||||||
|
// database flag bindings
|
||||||
|
viper.BindPFlag("database.name", command.PersistentFlags().Lookup("database-name"))
|
||||||
|
viper.BindPFlag("database.port", command.PersistentFlags().Lookup("database-port"))
|
||||||
|
viper.BindPFlag("database.hostname", command.PersistentFlags().Lookup("database-hostname"))
|
||||||
|
viper.BindPFlag("database.user", command.PersistentFlags().Lookup("database-user"))
|
||||||
|
viper.BindPFlag("database.password", command.PersistentFlags().Lookup("database-password"))
|
||||||
|
}
|
50
cmd/root.go
50
cmd/root.go
@ -19,16 +19,20 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/joho/godotenv"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/prom"
|
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
cfgFile string
|
cfgFile string
|
||||||
|
envFile string
|
||||||
subCommand string
|
subCommand string
|
||||||
logWithCommand log.Entry
|
logWithCommand log.Entry
|
||||||
)
|
)
|
||||||
@ -99,11 +103,8 @@ func init() {
|
|||||||
viper.AutomaticEnv()
|
viper.AutomaticEnv()
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
||||||
rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
|
rootCmd.PersistentFlags().StringVar(&envFile, "env", "", "environment file location")
|
||||||
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
|
||||||
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
|
||||||
rootCmd.PersistentFlags().String("database-user", "", "database user")
|
|
||||||
rootCmd.PersistentFlags().String("database-password", "", "database password")
|
|
||||||
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
|
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
|
||||||
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
||||||
rootCmd.PersistentFlags().String("log-file", "", "file path for logging")
|
rootCmd.PersistentFlags().String("log-file", "", "file path for logging")
|
||||||
@ -114,11 +115,6 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().String("prom-http-addr", "127.0.0.1", "http host for prometheus")
|
rootCmd.PersistentFlags().String("prom-http-addr", "127.0.0.1", "http host for prometheus")
|
||||||
rootCmd.PersistentFlags().String("prom-http-port", "8090", "http port for prometheus")
|
rootCmd.PersistentFlags().String("prom-http-port", "8090", "http port for prometheus")
|
||||||
|
|
||||||
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
|
|
||||||
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
|
|
||||||
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
|
|
||||||
viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user"))
|
|
||||||
viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password"))
|
|
||||||
viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level"))
|
viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level"))
|
||||||
viper.BindPFlag("log.file", rootCmd.PersistentFlags().Lookup("log-file"))
|
viper.BindPFlag("log.file", rootCmd.PersistentFlags().Lookup("log-file"))
|
||||||
|
|
||||||
@ -130,14 +126,32 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initConfig() {
|
func initConfig() {
|
||||||
|
if cfgFile == "" && envFile == "" {
|
||||||
|
log.Fatal("No configuration file specified, use --config , --env flag to provide configuration")
|
||||||
|
}
|
||||||
|
|
||||||
if cfgFile != "" {
|
if cfgFile != "" {
|
||||||
viper.SetConfigFile(cfgFile)
|
if filepath.Ext(cfgFile) != ".toml" {
|
||||||
if err := viper.ReadInConfig(); err == nil {
|
log.Fatal("Provide .toml file for --config flag")
|
||||||
log.Printf("Using config file: %s", viper.ConfigFileUsed())
|
|
||||||
} else {
|
|
||||||
log.Fatal(fmt.Sprintf("Couldn't read config file: %s", err.Error()))
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
log.Warn("No config file passed with --config flag")
|
viper.SetConfigFile(cfgFile)
|
||||||
|
if err := viper.ReadInConfig(); err != nil {
|
||||||
|
log.Fatalf("Couldn't read config file: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Using config file: %s", viper.ConfigFileUsed())
|
||||||
|
}
|
||||||
|
|
||||||
|
if envFile != "" {
|
||||||
|
if filepath.Ext(envFile) != ".env" {
|
||||||
|
log.Fatal("Provide .env file for --env flag")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := godotenv.Load(envFile); err != nil {
|
||||||
|
log.Fatalf("Failed to set environment variable from env file: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Using env file: %s", envFile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
327
cmd/serve.go
327
cmd/serve.go
@ -16,24 +16,31 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/graphql"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/mailgun/groupcache/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
"github.com/vulcanize/gap-filler/pkg/mux"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
|
||||||
srpc "github.com/vulcanize/ipld-eth-server/pkg/rpc"
|
srpc "github.com/vulcanize/ipld-eth-server/v3/pkg/rpc"
|
||||||
s "github.com/vulcanize/ipld-eth-server/pkg/serve"
|
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||||
v "github.com/vulcanize/ipld-eth-server/version"
|
v "github.com/vulcanize/ipld-eth-server/v3/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
|
||||||
|
|
||||||
// serveCmd represents the serve command
|
// serveCmd represents the serve command
|
||||||
var serveCmd = &cobra.Command{
|
var serveCmd = &cobra.Command{
|
||||||
Use: "serve",
|
Use: "serve",
|
||||||
@ -71,12 +78,29 @@ func serve() {
|
|||||||
if err := startServers(server, serverConfig); err != nil {
|
if err := startServers(server, serverConfig); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
graphQL, err := startGraphQL(server)
|
graphQL, err := startEthGraphQL(server, serverConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
shutdown := make(chan os.Signal)
|
err = startIpldGraphQL(serverConfig)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = startGroupCacheService(serverConfig)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if serverConfig.StateValidationEnabled {
|
||||||
|
go startStateTrieValidator(serverConfig, server)
|
||||||
|
logWithCommand.Info("state validator enabled")
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("state validator disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
shutdown := make(chan os.Signal, 1)
|
||||||
signal.Notify(shutdown, os.Interrupt)
|
signal.Notify(shutdown, os.Interrupt)
|
||||||
<-shutdown
|
<-shutdown
|
||||||
if graphQL != nil {
|
if graphQL != nil {
|
||||||
@ -87,27 +111,43 @@ func serve() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func startServers(server s.Server, settings *s.Config) error {
|
func startServers(server s.Server, settings *s.Config) error {
|
||||||
logWithCommand.Info("starting up IPC server")
|
if settings.IPCEnabled {
|
||||||
_, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs())
|
logWithCommand.Info("starting up IPC server")
|
||||||
if err != nil {
|
_, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs())
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("IPC server is disabled")
|
||||||
}
|
}
|
||||||
logWithCommand.Info("starting up WS server")
|
|
||||||
_, _, err = srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb"}, nil, true)
|
if settings.WSEnabled {
|
||||||
if err != nil {
|
logWithCommand.Info("starting up WS server")
|
||||||
return err
|
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("WS server is disabled")
|
||||||
}
|
}
|
||||||
logWithCommand.Info("starting up HTTP server")
|
|
||||||
_, err = srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"eth"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
if settings.HTTPEnabled {
|
||||||
return err
|
logWithCommand.Info("starting up HTTP server")
|
||||||
|
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("HTTP server is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startGraphQL(server s.Server) (graphQLServer *graphql.Service, err error) {
|
func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphql.Service, err error) {
|
||||||
viper.BindEnv("server.graphql", "SERVER_GRAPHQL")
|
if settings.EthGraphqlEnabled {
|
||||||
if viper.GetBool("server.graphql") {
|
logWithCommand.Info("starting up ETH GraphQL server")
|
||||||
logWithCommand.Info("starting up GraphQL server")
|
endPoint := settings.EthGraphqlEndpoint
|
||||||
viper.BindEnv("server.graphqlEndpoint", "SERVER_GRAPHQL_ENDPOINT")
|
|
||||||
endPoint := viper.GetString("server.graphqlEndpoint")
|
|
||||||
if endPoint != "" {
|
if endPoint != "" {
|
||||||
graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -115,19 +155,185 @@ func startGraphQL(server s.Server) (graphQLServer *graphql.Service, err error) {
|
|||||||
}
|
}
|
||||||
err = graphQLServer.Start(nil)
|
err = graphQLServer.Start(nil)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("ETH GraphQL server is disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startIpldGraphQL(settings *s.Config) error {
|
||||||
|
if settings.IpldGraphqlEnabled {
|
||||||
|
logWithCommand.Info("starting up IPLD GraphQL server")
|
||||||
|
|
||||||
|
gqlIpldAddr, err := url.Parse(settings.IpldPostgraphileEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gqlTracingAPIAddr, err := url.Parse(settings.TracingPostgraphileEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ethClients, err := parseRpcAddresses(settings.EthHttpEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tracingClients []*rpc.Client
|
||||||
|
tracingEndpoint := viper.GetString("tracing.httpPath")
|
||||||
|
if tracingEndpoint != "" {
|
||||||
|
tracingClients, err = parseRpcAddresses(tracingEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
router, err := mux.NewServeMux(&mux.Options{
|
||||||
|
BasePath: "/",
|
||||||
|
EnableGraphiQL: true,
|
||||||
|
Postgraphile: mux.PostgraphileOptions{
|
||||||
|
Default: gqlIpldAddr,
|
||||||
|
TracingAPI: gqlTracingAPIAddr,
|
||||||
|
},
|
||||||
|
RPC: mux.RPCOptions{
|
||||||
|
DefaultClients: ethClients,
|
||||||
|
TracingClients: tracingClients,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go http.ListenAndServe(settings.IpldGraphqlEndpoint, router)
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("IPLD GraphQL server is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startGroupCacheService(settings *s.Config) error {
|
||||||
|
gcc := settings.GroupCache
|
||||||
|
|
||||||
|
if gcc.Pool.Enabled {
|
||||||
|
logWithCommand.Info("starting up groupcache pool HTTTP server")
|
||||||
|
|
||||||
|
pool := groupcache.NewHTTPPoolOpts(gcc.Pool.HttpEndpoint, &groupcache.HTTPPoolOptions{})
|
||||||
|
pool.Set(gcc.Pool.PeerHttpEndpoints...)
|
||||||
|
|
||||||
|
httpURL, err := url.Parse(gcc.Pool.HttpEndpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
server := http.Server{
|
||||||
|
Addr: httpURL.Host,
|
||||||
|
Handler: pool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start a HTTP server to listen for peer requests from the groupcache
|
||||||
|
go server.ListenAndServe()
|
||||||
|
|
||||||
|
logWithCommand.Infof("groupcache pool endpoint opened for url %s", httpURL)
|
||||||
|
} else {
|
||||||
|
logWithCommand.Info("Groupcache pool is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startStateTrieValidator(config *s.Config, server s.Server) {
|
||||||
|
validateEveryNthBlock := config.StateValidationEveryNthBlock
|
||||||
|
|
||||||
|
var lastBlockNumber uint64
|
||||||
|
backend := server.Backend()
|
||||||
|
|
||||||
|
for {
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
block, err := backend.CurrentBlock()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln("Error fetching current block for state trie validator")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
stateRoot := block.Root()
|
||||||
|
blockNumber := block.NumberU64()
|
||||||
|
blockHash := block.Hash()
|
||||||
|
|
||||||
|
if validateEveryNthBlock <= 0 || // Used for static replicas where block number doesn't progress.
|
||||||
|
(blockNumber > lastBlockNumber) && (blockNumber%validateEveryNthBlock == 0) {
|
||||||
|
|
||||||
|
// The validate trie call will take a long time on mainnet, e.g. a few hours.
|
||||||
|
if err = backend.ValidateTrie(stateRoot); err != nil {
|
||||||
|
log.Fatalf("Error validating trie for block number %d hash %s state root %s",
|
||||||
|
blockNumber,
|
||||||
|
blockHash,
|
||||||
|
stateRoot,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Successfully validated trie for block number %d hash %s state root %s",
|
||||||
|
blockNumber,
|
||||||
|
blockHash,
|
||||||
|
stateRoot,
|
||||||
|
)
|
||||||
|
|
||||||
|
if validateEveryNthBlock <= 0 {
|
||||||
|
// Static replica, sleep a long-ish time (1/2 of cache expiry time) since we only need to keep the cache warm.
|
||||||
|
time.Sleep((time.Minute * time.Duration(config.GroupCache.StateDB.CacheExpiryInMins)) / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
lastBlockNumber = blockNumber
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRpcAddresses(value string) ([]*rpc.Client, error) {
|
||||||
|
rpcAddresses := strings.Split(value, ",")
|
||||||
|
rpcClients := make([]*rpc.Client, 0, len(rpcAddresses))
|
||||||
|
for _, address := range rpcAddresses {
|
||||||
|
rpcClient, err := rpc.Dial(address)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Errorf("couldn't connect to %s. Error: %s", address, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcClients = append(rpcClients, rpcClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rpcClients) == 0 {
|
||||||
|
logWithCommand.Error(ErrNoRpcEndpoints)
|
||||||
|
return nil, ErrNoRpcEndpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
return rpcClients, nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.AddCommand(serveCmd)
|
rootCmd.AddCommand(serveCmd)
|
||||||
|
|
||||||
|
addDatabaseFlags(serveCmd)
|
||||||
|
|
||||||
// flags for all config variables
|
// flags for all config variables
|
||||||
serveCmd.PersistentFlags().Bool("server-graphql", false, "turn on the graphql server")
|
// eth graphql and json-rpc parameters
|
||||||
serveCmd.PersistentFlags().String("server-graphql-endpoint", "", "endpoint url for graphql server")
|
serveCmd.PersistentFlags().Bool("eth-server-graphql", false, "turn on the eth graphql server")
|
||||||
serveCmd.PersistentFlags().String("server-ws-path", "", "vdb server ws path")
|
serveCmd.PersistentFlags().String("eth-server-graphql-path", "", "endpoint url for eth graphql server (host:port)")
|
||||||
serveCmd.PersistentFlags().String("server-http-path", "", "vdb server http path")
|
serveCmd.PersistentFlags().Bool("eth-server-http", true, "turn on the eth http json-rpc server")
|
||||||
serveCmd.PersistentFlags().String("server-ipc-path", "", "vdb server ipc path")
|
serveCmd.PersistentFlags().String("eth-server-http-path", "", "endpoint url for eth http json-rpc server (host:port)")
|
||||||
|
serveCmd.PersistentFlags().Bool("eth-server-ws", false, "turn on the eth websocket json-rpc server")
|
||||||
|
serveCmd.PersistentFlags().String("eth-server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
|
||||||
|
serveCmd.PersistentFlags().Bool("eth-server-ipc", false, "turn on the eth ipc json-rpc server")
|
||||||
|
serveCmd.PersistentFlags().String("eth-server-ipc-path", "", "path for eth ipc json-rpc server")
|
||||||
|
|
||||||
|
// ipld and tracing graphql parameters
|
||||||
|
serveCmd.PersistentFlags().Bool("ipld-server-graphql", false, "turn on the ipld graphql server")
|
||||||
|
serveCmd.PersistentFlags().String("ipld-server-graphql-path", "", "endpoint url for ipld graphql server (host:port)")
|
||||||
|
serveCmd.PersistentFlags().String("ipld-postgraphile-path", "", "http url to postgraphile on top of ipld database")
|
||||||
|
serveCmd.PersistentFlags().String("tracing-http-path", "", "http url to tracing service")
|
||||||
|
serveCmd.PersistentFlags().String("tracing-postgraphile-path", "", "http url to postgraphile on top of tracing db")
|
||||||
|
|
||||||
serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
|
serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
|
||||||
serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
|
serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
|
||||||
@ -137,13 +343,46 @@ func init() {
|
|||||||
serveCmd.PersistentFlags().String("eth-chain-id", "1", "eth chain id")
|
serveCmd.PersistentFlags().String("eth-chain-id", "1", "eth chain id")
|
||||||
serveCmd.PersistentFlags().String("eth-default-sender", "", "default sender address")
|
serveCmd.PersistentFlags().String("eth-default-sender", "", "default sender address")
|
||||||
serveCmd.PersistentFlags().String("eth-rpc-gas-cap", "", "rpc gas cap (for eth_Call execution)")
|
serveCmd.PersistentFlags().String("eth-rpc-gas-cap", "", "rpc gas cap (for eth_Call execution)")
|
||||||
|
serveCmd.PersistentFlags().String("eth-chain-config", "", "json chain config file location")
|
||||||
|
serveCmd.PersistentFlags().Bool("eth-supports-state-diff", false, "whether the proxy ethereum client supports statediffing endpoints")
|
||||||
|
serveCmd.PersistentFlags().Bool("eth-forward-eth-calls", false, "whether to immediately forward eth_calls to proxy client")
|
||||||
|
serveCmd.PersistentFlags().Bool("eth-proxy-on-error", true, "whether to forward all failed calls to proxy client")
|
||||||
|
|
||||||
|
// groupcache flags
|
||||||
|
serveCmd.PersistentFlags().Bool("gcache-pool-enabled", false, "turn on the groupcache pool")
|
||||||
|
serveCmd.PersistentFlags().String("gcache-pool-http-path", "", "http url for groupcache node")
|
||||||
|
serveCmd.PersistentFlags().StringArray("gcache-pool-http-peers", []string{}, "http urls for groupcache peers")
|
||||||
|
serveCmd.PersistentFlags().Int("gcache-statedb-cache-size", 16, "state DB cache size in MB")
|
||||||
|
serveCmd.PersistentFlags().Int("gcache-statedb-cache-expiry", 60, "state DB cache expiry time in mins")
|
||||||
|
serveCmd.PersistentFlags().Int("gcache-statedb-log-stats-interval", 60, "state DB cache stats log interval in secs")
|
||||||
|
|
||||||
|
// state validator flags
|
||||||
|
serveCmd.PersistentFlags().Bool("validator-enabled", false, "turn on the state validator")
|
||||||
|
serveCmd.PersistentFlags().Uint("validator-every-nth-block", 1500, "only validate every Nth block")
|
||||||
|
|
||||||
// and their bindings
|
// and their bindings
|
||||||
viper.BindPFlag("server.graphql", serveCmd.PersistentFlags().Lookup("server-graphql"))
|
// eth graphql server
|
||||||
viper.BindPFlag("server.graphqlEndpoint", serveCmd.PersistentFlags().Lookup("server-graphql-endpoint"))
|
viper.BindPFlag("eth.server.graphql", serveCmd.PersistentFlags().Lookup("eth-server-graphql"))
|
||||||
viper.BindPFlag("server.wsPath", serveCmd.PersistentFlags().Lookup("server-ws-path"))
|
viper.BindPFlag("eth.server.graphqlPath", serveCmd.PersistentFlags().Lookup("eth-server-graphql-path"))
|
||||||
viper.BindPFlag("server.httpPath", serveCmd.PersistentFlags().Lookup("server-http-path"))
|
|
||||||
viper.BindPFlag("server.ipcPath", serveCmd.PersistentFlags().Lookup("server-ipc-path"))
|
// eth http json-rpc server
|
||||||
|
viper.BindPFlag("eth.server.http", serveCmd.PersistentFlags().Lookup("eth-server-http"))
|
||||||
|
viper.BindPFlag("eth.server.httpPath", serveCmd.PersistentFlags().Lookup("eth-server-http-path"))
|
||||||
|
|
||||||
|
// eth websocket json-rpc server
|
||||||
|
viper.BindPFlag("eth.server.ws", serveCmd.PersistentFlags().Lookup("eth-server-ws"))
|
||||||
|
viper.BindPFlag("eth.server.wsPath", serveCmd.PersistentFlags().Lookup("eth-server-ws-path"))
|
||||||
|
|
||||||
|
// eth ipc json-rpc server
|
||||||
|
viper.BindPFlag("eth.server.ipc", serveCmd.PersistentFlags().Lookup("eth-server-ipc"))
|
||||||
|
viper.BindPFlag("eth.server.ipcPath", serveCmd.PersistentFlags().Lookup("eth-server-ipc-path"))
|
||||||
|
|
||||||
|
// ipld and tracing graphql parameters
|
||||||
|
viper.BindPFlag("ipld.server.graphql", serveCmd.PersistentFlags().Lookup("ipld-server-graphql"))
|
||||||
|
viper.BindPFlag("ipld.server.graphqlPath", serveCmd.PersistentFlags().Lookup("ipld-server-graphql-path"))
|
||||||
|
viper.BindPFlag("ipld.postgraphilePath", serveCmd.PersistentFlags().Lookup("ipld-postgraphile-path"))
|
||||||
|
viper.BindPFlag("tracing.httpPath", serveCmd.PersistentFlags().Lookup("tracing-http-path"))
|
||||||
|
viper.BindPFlag("tracing.postgraphilePath", serveCmd.PersistentFlags().Lookup("tracing-postgraphile-path"))
|
||||||
|
|
||||||
viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path"))
|
viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path"))
|
||||||
viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id"))
|
viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id"))
|
||||||
@ -153,4 +392,20 @@ func init() {
|
|||||||
viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id"))
|
viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id"))
|
||||||
viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender"))
|
viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender"))
|
||||||
viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap"))
|
viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap"))
|
||||||
|
viper.BindPFlag("ethereum.chainConfig", serveCmd.PersistentFlags().Lookup("eth-chain-config"))
|
||||||
|
viper.BindPFlag("ethereum.supportsStateDiff", serveCmd.PersistentFlags().Lookup("eth-supports-state-diff"))
|
||||||
|
viper.BindPFlag("ethereum.forwardEthCalls", serveCmd.PersistentFlags().Lookup("eth-forward-eth-calls"))
|
||||||
|
viper.BindPFlag("ethereum.proxyOnError", serveCmd.PersistentFlags().Lookup("eth-proxy-on-error"))
|
||||||
|
|
||||||
|
// groupcache flags
|
||||||
|
viper.BindPFlag("groupcache.pool.enabled", serveCmd.PersistentFlags().Lookup("gcache-pool-enabled"))
|
||||||
|
viper.BindPFlag("groupcache.pool.httpEndpoint", serveCmd.PersistentFlags().Lookup("gcache-pool-http-path"))
|
||||||
|
viper.BindPFlag("groupcache.pool.peerHttpEndpoints", serveCmd.PersistentFlags().Lookup("gcache-pool-http-peers"))
|
||||||
|
viper.BindPFlag("groupcache.statedb.cacheSizeInMB", serveCmd.PersistentFlags().Lookup("gcache-statedb-cache-size"))
|
||||||
|
viper.BindPFlag("groupcache.statedb.cacheExpiryInMins", serveCmd.PersistentFlags().Lookup("gcache-statedb-cache-expiry"))
|
||||||
|
viper.BindPFlag("groupcache.statedb.logStatsIntervalInSecs", serveCmd.PersistentFlags().Lookup("gcache-statedb-log-stats-interval"))
|
||||||
|
|
||||||
|
// state validator flags
|
||||||
|
viper.BindPFlag("validator.enabled", serveCmd.PersistentFlags().Lookup("validator-enabled"))
|
||||||
|
viper.BindPFlag("validator.everyNthBlock", serveCmd.PersistentFlags().Lookup("validator-every-nth-block"))
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -28,9 +27,9 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/client"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
w "github.com/vulcanize/ipld-eth-server/pkg/serve"
|
w "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||||
)
|
)
|
||||||
|
|
||||||
// subscribeCmd represents the subscribe command
|
// subscribeCmd represents the subscribe command
|
||||||
@ -129,7 +128,7 @@ func subscribe() {
|
|||||||
}
|
}
|
||||||
// This assumes leafs only
|
// This assumes leafs only
|
||||||
for _, stateNode := range ethData.StateNodes {
|
for _, stateNode := range ethData.StateNodes {
|
||||||
var acct state.Account
|
var acct types.StateAccount
|
||||||
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
|
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Error(err)
|
logWithCommand.Error(err)
|
||||||
|
87
cmd/validate.go
Normal file
87
cmd/validate.go
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
// Copyright © 2021 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
||||||
|
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
||||||
|
|
||||||
|
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GroupName = "statedb-validate"
|
||||||
|
const CacheExpiryInMins = 8 * 60 // 8 hours
|
||||||
|
const CacheSizeInMB = 16 // 16 MB
|
||||||
|
|
||||||
|
var validateCmd = &cobra.Command{
|
||||||
|
Use: "validate",
|
||||||
|
Short: "valdiate state",
|
||||||
|
Long: `This command validates the trie for the given state root`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
subCommand = cmd.CalledAs()
|
||||||
|
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||||
|
validate()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func validate() {
|
||||||
|
config, err := s.NewConfig()
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateRootStr := viper.GetString("stateRoot")
|
||||||
|
if stateRootStr == "" {
|
||||||
|
logWithCommand.Fatal("must provide a state root for state validation")
|
||||||
|
}
|
||||||
|
|
||||||
|
stateRoot := common.HexToHash(stateRootStr)
|
||||||
|
cacheSize := viper.GetInt("cacheSize")
|
||||||
|
|
||||||
|
ethDB := ipfsethdb.NewDatabase(config.DB, ipfsethdb.CacheConfig{
|
||||||
|
Name: GroupName,
|
||||||
|
Size: cacheSize * 1024 * 1024,
|
||||||
|
ExpiryDuration: time.Minute * time.Duration(CacheExpiryInMins),
|
||||||
|
})
|
||||||
|
|
||||||
|
val := validator.NewValidator(nil, ethDB)
|
||||||
|
if err = val.ValidateTrie(stateRoot); err != nil {
|
||||||
|
log.Fatalln("Error validating state root")
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := ethDB.(*ipfsethdb.Database).GetCacheStats()
|
||||||
|
log.Debugf("groupcache stats %+v", stats)
|
||||||
|
|
||||||
|
log.Infoln("Successfully validated state root")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(validateCmd)
|
||||||
|
|
||||||
|
addDatabaseFlags(validateCmd)
|
||||||
|
|
||||||
|
validateCmd.PersistentFlags().String("state-root", "", "root of the state trie we wish to validate")
|
||||||
|
viper.BindPFlag("stateRoot", validateCmd.PersistentFlags().Lookup("state-root"))
|
||||||
|
|
||||||
|
validateCmd.PersistentFlags().Int("cache-size", CacheSizeInMB, "cache size in MB")
|
||||||
|
viper.BindPFlag("cacheSize", validateCmd.PersistentFlags().Lookup("cache-size"))
|
||||||
|
}
|
@ -19,7 +19,7 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
v "github.com/vulcanize/ipld-eth-server/version"
|
v "github.com/vulcanize/ipld-eth-server/v3/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// versionCmd represents the version command
|
// versionCmd represents the version command
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE IF NOT EXISTS public.blocks (
|
|
||||||
key TEXT UNIQUE NOT NULL,
|
|
||||||
data BYTEA NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE public.blocks;
|
|
@ -1,12 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE nodes (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
client_name VARCHAR,
|
|
||||||
genesis_block VARCHAR(66),
|
|
||||||
network_id VARCHAR,
|
|
||||||
node_id VARCHAR(128),
|
|
||||||
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE nodes;
|
|
@ -1,5 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE SCHEMA eth;
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP SCHEMA eth;
|
|
@ -1,23 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.header_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
block_number BIGINT NOT NULL,
|
|
||||||
block_hash VARCHAR(66) NOT NULL,
|
|
||||||
parent_hash VARCHAR(66) NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
td NUMERIC NOT NULL,
|
|
||||||
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
|
|
||||||
reward NUMERIC NOT NULL,
|
|
||||||
state_root VARCHAR(66) NOT NULL,
|
|
||||||
tx_root VARCHAR(66) NOT NULL,
|
|
||||||
receipt_root VARCHAR(66) NOT NULL,
|
|
||||||
uncle_root VARCHAR(66) NOT NULL,
|
|
||||||
bloom BYTEA NOT NULL,
|
|
||||||
timestamp NUMERIC NOT NULL,
|
|
||||||
times_validated INTEGER NOT NULL DEFAULT 1,
|
|
||||||
UNIQUE (block_number, block_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.header_cids;
|
|
@ -1,14 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.uncle_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
block_hash VARCHAR(66) NOT NULL,
|
|
||||||
parent_hash VARCHAR(66) NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
reward NUMERIC NOT NULL,
|
|
||||||
UNIQUE (header_id, block_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.uncle_cids;
|
|
@ -1,17 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.transaction_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
tx_hash VARCHAR(66) NOT NULL,
|
|
||||||
index INTEGER NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
dst VARCHAR(66) NOT NULL,
|
|
||||||
src VARCHAR(66) NOT NULL,
|
|
||||||
deployment BOOL NOT NULL,
|
|
||||||
tx_data BYTEA,
|
|
||||||
UNIQUE (header_id, tx_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.transaction_cids;
|
|
@ -1,18 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.receipt_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
contract VARCHAR(66),
|
|
||||||
contract_hash VARCHAR(66),
|
|
||||||
topic0s VARCHAR(66)[],
|
|
||||||
topic1s VARCHAR(66)[],
|
|
||||||
topic2s VARCHAR(66)[],
|
|
||||||
topic3s VARCHAR(66)[],
|
|
||||||
log_contracts VARCHAR(66)[],
|
|
||||||
UNIQUE (tx_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.receipt_cids;
|
|
@ -1,15 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.state_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
state_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
state_path BYTEA,
|
|
||||||
node_type INTEGER NOT NULL,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
UNIQUE (header_id, state_path)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.state_cids;
|
|
@ -1,15 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.storage_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
storage_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
storage_path BYTEA,
|
|
||||||
node_type INTEGER NOT NULL,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
UNIQUE (state_id, storage_path)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.storage_cids;
|
|
@ -1,13 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.state_accounts (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
balance NUMERIC NOT NULL,
|
|
||||||
nonce INTEGER NOT NULL,
|
|
||||||
code_hash BYTEA NOT NULL,
|
|
||||||
storage_root VARCHAR(66) NOT NULL,
|
|
||||||
UNIQUE (state_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
@ -1,6 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
|
|
||||||
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
|
|
||||||
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
|
|
||||||
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
|
|
||||||
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';
|
|
@ -1,21 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
ADD COLUMN chain_id INTEGER DEFAULT 1;
|
|
||||||
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
DROP CONSTRAINT node_uc;
|
|
||||||
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
ADD CONSTRAINT node_uc
|
|
||||||
UNIQUE (genesis_block, network_id, node_id, chain_id);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
DROP CONSTRAINT node_uc;
|
|
||||||
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
ADD CONSTRAINT node_uc
|
|
||||||
UNIQUE (genesis_block, network_id, node_id);
|
|
||||||
|
|
||||||
ALTER TABLE public.nodes
|
|
||||||
DROP COLUMN chain_id;
|
|
@ -1,69 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE FUNCTION eth.graphql_subscription() returns TRIGGER as $$
|
|
||||||
declare
|
|
||||||
table_name text = TG_ARGV[0];
|
|
||||||
attribute text = TG_ARGV[1];
|
|
||||||
id text;
|
|
||||||
begin
|
|
||||||
execute 'select $1.' || quote_ident(attribute)
|
|
||||||
using new
|
|
||||||
into id;
|
|
||||||
perform pg_notify('postgraphile:' || table_name,
|
|
||||||
json_build_object(
|
|
||||||
'__node__', json_build_array(
|
|
||||||
table_name,
|
|
||||||
id
|
|
||||||
)
|
|
||||||
)::text
|
|
||||||
);
|
|
||||||
return new;
|
|
||||||
end;
|
|
||||||
$$ language plpgsql;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
CREATE TRIGGER header_cids_ai
|
|
||||||
after INSERT ON eth.header_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('header_cids', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER receipt_cids_ai
|
|
||||||
after INSERT ON eth.receipt_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('receipt_cids', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER state_accounts_ai
|
|
||||||
after INSERT ON eth.state_accounts
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('state_accounts', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER state_cids_ai
|
|
||||||
after INSERT ON eth.state_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('state_cids', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER storage_cids_ai
|
|
||||||
after INSERT ON eth.storage_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('storage_cids', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER transaction_cids_ai
|
|
||||||
after INSERT ON eth.transaction_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('transaction_cids', 'id');
|
|
||||||
|
|
||||||
CREATE TRIGGER uncle_cids_ai
|
|
||||||
after INSERT ON eth.uncle_cids
|
|
||||||
for each row
|
|
||||||
execute procedure eth.graphql_subscription('uncle_cids', 'id');
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TRIGGER uncle_cids_ai ON eth.uncle_cids;
|
|
||||||
DROP TRIGGER transaction_cids_ai ON eth.transaction_cids;
|
|
||||||
DROP TRIGGER storage_cids_ai ON eth.storage_cids;
|
|
||||||
DROP TRIGGER state_cids_ai ON eth.state_cids;
|
|
||||||
DROP TRIGGER state_accounts_ai ON eth.state_accounts;
|
|
||||||
DROP TRIGGER receipt_cids_ai ON eth.receipt_cids;
|
|
||||||
DROP TRIGGER header_cids_ai ON eth.header_cids;
|
|
||||||
|
|
||||||
DROP FUNCTION eth.graphql_subscription();
|
|
@ -1,121 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- header indexes
|
|
||||||
CREATE INDEX block_number_index ON eth.header_cids USING brin (block_number);
|
|
||||||
|
|
||||||
CREATE INDEX block_hash_index ON eth.header_cids USING btree (block_hash);
|
|
||||||
|
|
||||||
CREATE INDEX header_cid_index ON eth.header_cids USING btree (cid);
|
|
||||||
|
|
||||||
CREATE INDEX header_mh_index ON eth.header_cids USING btree (mh_key);
|
|
||||||
|
|
||||||
CREATE INDEX state_root_index ON eth.header_cids USING btree (state_root);
|
|
||||||
|
|
||||||
CREATE INDEX timestamp_index ON eth.header_cids USING brin (timestamp);
|
|
||||||
|
|
||||||
-- transaction indexes
|
|
||||||
CREATE INDEX tx_header_id_index ON eth.transaction_cids USING btree (header_id);
|
|
||||||
|
|
||||||
CREATE INDEX tx_hash_index ON eth.transaction_cids USING btree (tx_hash);
|
|
||||||
|
|
||||||
CREATE INDEX tx_cid_index ON eth.transaction_cids USING btree (cid);
|
|
||||||
|
|
||||||
CREATE INDEX tx_mh_index ON eth.transaction_cids USING btree (mh_key);
|
|
||||||
|
|
||||||
CREATE INDEX tx_dst_index ON eth.transaction_cids USING btree (dst);
|
|
||||||
|
|
||||||
CREATE INDEX tx_src_index ON eth.transaction_cids USING btree (src);
|
|
||||||
|
|
||||||
-- receipt indexes
|
|
||||||
CREATE INDEX rct_tx_id_index ON eth.receipt_cids USING btree (tx_id);
|
|
||||||
|
|
||||||
CREATE INDEX rct_cid_index ON eth.receipt_cids USING btree (cid);
|
|
||||||
|
|
||||||
CREATE INDEX rct_mh_index ON eth.receipt_cids USING btree (mh_key);
|
|
||||||
|
|
||||||
CREATE INDEX rct_contract_index ON eth.receipt_cids USING btree (contract);
|
|
||||||
|
|
||||||
CREATE INDEX rct_contract_hash_index ON eth.receipt_cids USING btree (contract_hash);
|
|
||||||
|
|
||||||
CREATE INDEX rct_topic0_index ON eth.receipt_cids USING gin (topic0s);
|
|
||||||
|
|
||||||
CREATE INDEX rct_topic1_index ON eth.receipt_cids USING gin (topic1s);
|
|
||||||
|
|
||||||
CREATE INDEX rct_topic2_index ON eth.receipt_cids USING gin (topic2s);
|
|
||||||
|
|
||||||
CREATE INDEX rct_topic3_index ON eth.receipt_cids USING gin (topic3s);
|
|
||||||
|
|
||||||
CREATE INDEX rct_log_contract_index ON eth.receipt_cids USING gin (log_contracts);
|
|
||||||
|
|
||||||
-- state node indexes
|
|
||||||
CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id);
|
|
||||||
|
|
||||||
CREATE INDEX state_leaf_key_index ON eth.state_cids USING btree (state_leaf_key);
|
|
||||||
|
|
||||||
CREATE INDEX state_cid_index ON eth.state_cids USING btree (cid);
|
|
||||||
|
|
||||||
CREATE INDEX state_mh_index ON eth.state_cids USING btree (mh_key);
|
|
||||||
|
|
||||||
CREATE INDEX state_path_index ON eth.state_cids USING btree (state_path);
|
|
||||||
|
|
||||||
-- storage node indexes
|
|
||||||
CREATE INDEX storage_state_id_index ON eth.storage_cids USING btree (state_id);
|
|
||||||
|
|
||||||
CREATE INDEX storage_leaf_key_index ON eth.storage_cids USING btree (storage_leaf_key);
|
|
||||||
|
|
||||||
CREATE INDEX storage_cid_index ON eth.storage_cids USING btree (cid);
|
|
||||||
|
|
||||||
CREATE INDEX storage_mh_index ON eth.storage_cids USING btree (mh_key);
|
|
||||||
|
|
||||||
CREATE INDEX storage_path_index ON eth.storage_cids USING btree (storage_path);
|
|
||||||
|
|
||||||
-- state accounts indexes
|
|
||||||
CREATE INDEX account_state_id_index ON eth.state_accounts USING btree (state_id);
|
|
||||||
|
|
||||||
CREATE INDEX storage_root_index ON eth.state_accounts USING btree (storage_root);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
-- state account indexes
|
|
||||||
DROP INDEX eth.storage_root_index;
|
|
||||||
DROP INDEX eth.account_state_id_index;
|
|
||||||
|
|
||||||
-- storage node indexes
|
|
||||||
DROP INDEX eth.storage_path_index;
|
|
||||||
DROP INDEX eth.storage_mh_index;
|
|
||||||
DROP INDEX eth.storage_cid_index;
|
|
||||||
DROP INDEX eth.storage_leaf_key_index;
|
|
||||||
DROP INDEX eth.storage_state_id_index;
|
|
||||||
|
|
||||||
-- state node indexes
|
|
||||||
DROP INDEX eth.state_path_index;
|
|
||||||
DROP INDEX eth.state_mh_index;
|
|
||||||
DROP INDEX eth.state_cid_index;
|
|
||||||
DROP INDEX eth.state_leaf_key_index;
|
|
||||||
DROP INDEX eth.state_header_id_index;
|
|
||||||
|
|
||||||
-- receipt indexes
|
|
||||||
DROP INDEX eth.rct_log_contract_index;
|
|
||||||
DROP INDEX eth.rct_topic3_index;
|
|
||||||
DROP INDEX eth.rct_topic2_index;
|
|
||||||
DROP INDEX eth.rct_topic1_index;
|
|
||||||
DROP INDEX eth.rct_topic0_index;
|
|
||||||
DROP INDEX eth.rct_contract_hash_index;
|
|
||||||
DROP INDEX eth.rct_contract_index;
|
|
||||||
DROP INDEX eth.rct_mh_index;
|
|
||||||
DROP INDEX eth.rct_cid_index;
|
|
||||||
DROP INDEX eth.rct_tx_id_index;
|
|
||||||
|
|
||||||
-- transaction indexes
|
|
||||||
DROP INDEX eth.tx_src_index;
|
|
||||||
DROP INDEX eth.tx_dst_index;
|
|
||||||
DROP INDEX eth.tx_mh_index;
|
|
||||||
DROP INDEX eth.tx_cid_index;
|
|
||||||
DROP INDEX eth.tx_hash_index;
|
|
||||||
DROP INDEX eth.tx_header_id_index;
|
|
||||||
|
|
||||||
-- header indexes
|
|
||||||
DROP INDEX eth.timestamp_index;
|
|
||||||
DROP INDEX eth.state_root_index;
|
|
||||||
DROP INDEX eth.header_mh_index;
|
|
||||||
DROP INDEX eth.header_cid_index;
|
|
||||||
DROP INDEX eth.block_hash_index;
|
|
||||||
DROP INDEX eth.block_number_index;
|
|
@ -1,48 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- returns the number of child headers that reference backwards to the header with the provided hash
|
|
||||||
CREATE OR REPLACE FUNCTION header_weight(hash VARCHAR(66)) RETURNS BIGINT
|
|
||||||
AS $$
|
|
||||||
WITH RECURSIVE validator AS (
|
|
||||||
SELECT block_hash, parent_hash, block_number
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_hash = hash
|
|
||||||
UNION
|
|
||||||
SELECT eth.header_cids.block_hash, eth.header_cids.parent_hash, eth.header_cids.block_number
|
|
||||||
FROM eth.header_cids
|
|
||||||
INNER JOIN validator
|
|
||||||
ON eth.header_cids.parent_hash = validator.block_hash
|
|
||||||
AND eth.header_cids.block_number = validator.block_number + 1
|
|
||||||
)
|
|
||||||
SELECT COUNT(*) FROM validator;
|
|
||||||
$$ LANGUAGE SQL;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- returns the id for the header at the provided height which is heaviest
|
|
||||||
CREATE OR REPLACE FUNCTION canonical_header(height BIGINT) RETURNS INT AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
current_weight INT;
|
|
||||||
heaviest_weight INT DEFAULT 0;
|
|
||||||
heaviest_id INT;
|
|
||||||
r eth.header_cids%ROWTYPE;
|
|
||||||
BEGIN
|
|
||||||
FOR r IN SELECT * FROM eth.header_cids
|
|
||||||
WHERE block_number = height
|
|
||||||
LOOP
|
|
||||||
SELECT INTO current_weight * FROM header_weight(r.block_hash);
|
|
||||||
IF current_weight > heaviest_weight THEN
|
|
||||||
heaviest_weight := current_weight;
|
|
||||||
heaviest_id := r.id;
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
RETURN heaviest_id;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP FUNCTION header_weight;
|
|
||||||
DROP FUNCTION canonical_header;
|
|
@ -1,7 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
ALTER TABLE eth.transaction_cids
|
|
||||||
DROP COLUMN deployment;
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
ALTER TABLE eth.transaction_cids
|
|
||||||
ADD COLUMN deployment BOOL NOT NULL DEFAULT FALSE;
|
|
@ -1,25 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
ALTER TABLE eth.storage_cids
|
|
||||||
ALTER COLUMN state_id TYPE BIGINT;
|
|
||||||
|
|
||||||
ALTER TABLE eth.state_accounts
|
|
||||||
ALTER COLUMN state_id TYPE BIGINT;
|
|
||||||
|
|
||||||
ALTER TABLE eth.state_cids
|
|
||||||
ALTER COLUMN id TYPE BIGINT;
|
|
||||||
|
|
||||||
ALTER TABLE eth.storage_cids
|
|
||||||
ALTER COLUMN id TYPE BIGINT;
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
ALTER TABLE eth.storage_cids
|
|
||||||
ALTER COLUMN id TYPE INTEGER;
|
|
||||||
|
|
||||||
ALTER TABLE eth.state_cids
|
|
||||||
ALTER COLUMN id TYPE INTEGER;
|
|
||||||
|
|
||||||
ALTER TABLE eth.state_accounts
|
|
||||||
ALTER COLUMN state_id TYPE INTEGER;
|
|
||||||
|
|
||||||
ALTER TABLE eth.storage_cids
|
|
||||||
ALTER COLUMN state_id TYPE INTEGER;
|
|
@ -1,39 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- returns if a storage node at the provided path was removed in the range > the provided height and <= the provided block hash
|
|
||||||
CREATE OR REPLACE FUNCTION was_storage_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
|
|
||||||
AS $$
|
|
||||||
SELECT exists(SELECT 1
|
|
||||||
FROM eth.storage_cids
|
|
||||||
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
|
||||||
WHERE storage_path = path
|
|
||||||
AND block_number > height
|
|
||||||
AND block_number <= (SELECT block_number
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_hash = hash)
|
|
||||||
AND storage_cids.node_type = 3
|
|
||||||
LIMIT 1);
|
|
||||||
$$ LANGUAGE SQL;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
-- returns if a state node at the provided path was removed in the range > the provided height and <= the provided block hash
|
|
||||||
CREATE OR REPLACE FUNCTION was_state_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
|
|
||||||
AS $$
|
|
||||||
SELECT exists(SELECT 1
|
|
||||||
FROM eth.state_cids
|
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
|
||||||
WHERE state_path = path
|
|
||||||
AND block_number > height
|
|
||||||
AND block_number <= (SELECT block_number
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_hash = hash)
|
|
||||||
AND state_cids.node_type = 3
|
|
||||||
LIMIT 1);
|
|
||||||
$$ LANGUAGE SQL;
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP FUNCTION was_storage_removed;
|
|
||||||
DROP FUNCTION was_state_removed;
|
|
@ -1,121 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE TYPE child_result AS (
|
|
||||||
has_child BOOLEAN,
|
|
||||||
children eth.header_cids[]
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION has_child(hash VARCHAR(66), height BIGINT) RETURNS child_result AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
child_height INT;
|
|
||||||
temp_child eth.header_cids;
|
|
||||||
new_child_result child_result;
|
|
||||||
BEGIN
|
|
||||||
child_height = height + 1;
|
|
||||||
-- short circuit if there are no children
|
|
||||||
SELECT exists(SELECT 1
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE parent_hash = hash
|
|
||||||
AND block_number = child_height
|
|
||||||
LIMIT 1)
|
|
||||||
INTO new_child_result.has_child;
|
|
||||||
-- collect all the children for this header
|
|
||||||
IF new_child_result.has_child THEN
|
|
||||||
FOR temp_child IN
|
|
||||||
SELECT * FROM eth.header_cids WHERE parent_hash = hash AND block_number = child_height
|
|
||||||
LOOP
|
|
||||||
new_child_result.children = array_append(new_child_result.children, temp_child);
|
|
||||||
END LOOP;
|
|
||||||
END IF;
|
|
||||||
RETURN new_child_result;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE OR REPLACE FUNCTION canonical_header_from_array(headers eth.header_cids[]) RETURNS eth.header_cids AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_header eth.header_cids;
|
|
||||||
canonical_child eth.header_cids;
|
|
||||||
header eth.header_cids;
|
|
||||||
current_child_result child_result;
|
|
||||||
child_headers eth.header_cids[];
|
|
||||||
current_header_with_child eth.header_cids;
|
|
||||||
has_children_count INT DEFAULT 0;
|
|
||||||
BEGIN
|
|
||||||
-- for each header in the provided set
|
|
||||||
FOREACH header IN ARRAY headers
|
|
||||||
LOOP
|
|
||||||
-- check if it has any children
|
|
||||||
current_child_result = has_child(header.block_hash, header.block_number);
|
|
||||||
IF current_child_result.has_child THEN
|
|
||||||
-- if it does, take note
|
|
||||||
has_children_count = has_children_count + 1;
|
|
||||||
current_header_with_child = header;
|
|
||||||
-- and add the children to the growing set of child headers
|
|
||||||
child_headers = array_cat(child_headers, current_child_result.children);
|
|
||||||
END IF;
|
|
||||||
END LOOP;
|
|
||||||
-- if none of the headers had children, none is more canonical than the other
|
|
||||||
IF has_children_count = 0 THEN
|
|
||||||
-- return the first one selected
|
|
||||||
SELECT * INTO canonical_header FROM unnest(headers) LIMIT 1;
|
|
||||||
-- if only one header had children, it can be considered the heaviest/canonical header of the set
|
|
||||||
ELSIF has_children_count = 1 THEN
|
|
||||||
-- return the only header with a child
|
|
||||||
canonical_header = current_header_with_child;
|
|
||||||
-- if there are multiple headers with children
|
|
||||||
ELSE
|
|
||||||
-- find the canonical header from the child set
|
|
||||||
canonical_child = canonical_header_from_array(child_headers);
|
|
||||||
-- the header that is parent to this header, is the canonical header at this level
|
|
||||||
SELECT * INTO canonical_header FROM unnest(headers)
|
|
||||||
WHERE block_hash = canonical_child.parent_hash;
|
|
||||||
END IF;
|
|
||||||
RETURN canonical_header;
|
|
||||||
END
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose StatementBegin
|
|
||||||
CREATE OR REPLACE FUNCTION canonical_header_id(height BIGINT) RETURNS INTEGER AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
canonical_header eth.header_cids;
|
|
||||||
headers eth.header_cids[];
|
|
||||||
header_count INT;
|
|
||||||
temp_header eth.header_cids;
|
|
||||||
BEGIN
|
|
||||||
-- collect all headers at this height
|
|
||||||
FOR temp_header IN
|
|
||||||
SELECT * FROM eth.header_cids WHERE block_number = height
|
|
||||||
LOOP
|
|
||||||
headers = array_append(headers, temp_header);
|
|
||||||
END LOOP;
|
|
||||||
-- count the number of headers collected
|
|
||||||
header_count = array_length(headers, 1);
|
|
||||||
-- if we have less than 1 header, return NULL
|
|
||||||
IF header_count IS NULL OR header_count < 1 THEN
|
|
||||||
RETURN NULL;
|
|
||||||
-- if we have one header, return its id
|
|
||||||
ELSIF header_count = 1 THEN
|
|
||||||
RETURN headers[1].id;
|
|
||||||
-- if we have multiple headers we need to determine which one is canonical
|
|
||||||
ELSE
|
|
||||||
canonical_header = canonical_header_from_array(headers);
|
|
||||||
RETURN canonical_header.id;
|
|
||||||
END IF;
|
|
||||||
END;
|
|
||||||
$BODY$
|
|
||||||
LANGUAGE 'plpgsql';
|
|
||||||
-- +goose StatementEnd
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP FUNCTION canonical_header_id;
|
|
||||||
DROP FUNCTION canonical_header_from_array;
|
|
||||||
DROP FUNCTION has_child;
|
|
||||||
DROP TYPE child_result;
|
|
@ -1,13 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
ALTER TABLE eth.receipt_cids
|
|
||||||
ADD COLUMN post_state VARCHAR(66);
|
|
||||||
|
|
||||||
ALTER TABLE eth.receipt_cids
|
|
||||||
ADD COLUMN post_status INTEGER;
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
ALTER TABLE eth.receipt_cids
|
|
||||||
DROP COLUMN post_status;
|
|
||||||
|
|
||||||
ALTER TABLE eth.receipt_cids
|
|
||||||
DROP COLUMN post_state;
|
|
1252
db/schema.sql
1252
db/schema.sql
File diff suppressed because it is too large
Load Diff
12
docker-compose.test.yml
Normal file
12
docker-compose.test.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
contract:
|
||||||
|
build:
|
||||||
|
context: ./test/contract
|
||||||
|
args:
|
||||||
|
ETH_ADDR: "http://go-ethereum:8545"
|
||||||
|
environment:
|
||||||
|
ETH_ADDR: "http://go-ethereum:8545"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:3000:3000"
|
@ -1,68 +1,54 @@
|
|||||||
version: '3.2'
|
version: '3.2'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
dapptools:
|
ipld-eth-db:
|
||||||
restart: unless-stopped
|
|
||||||
image: vulcanize/dapptools:v0.29.0-statediff-0.0.2
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8545:8545"
|
|
||||||
- "127.0.0.1:8546:8546"
|
|
||||||
|
|
||||||
db:
|
|
||||||
restart: always
|
restart: always
|
||||||
image: postgres:10.12-alpine
|
image: vulcanize/ipld-eth-db:v3.2.0
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "vdbm"
|
POSTGRES_USER: "vdbm"
|
||||||
POSTGRES_DB: "vulcanize_public"
|
POSTGRES_DB: "vulcanize_testing"
|
||||||
POSTGRES_PASSWORD: "password"
|
POSTGRES_PASSWORD: "password"
|
||||||
volumes:
|
volumes:
|
||||||
- vdb_db_eth_server:/var/lib/postgresql/data
|
- vdb_db_eth_server:/var/lib/postgresql/data
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8077:5432"
|
- "127.0.0.1:8077:5432"
|
||||||
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
eth-indexer:
|
|
||||||
restart: unless-stopped
|
|
||||||
depends_on:
|
|
||||||
- db
|
|
||||||
- dapptools
|
|
||||||
image: vulcanize/ipld-eth-indexer:v0.3.0-alpha
|
|
||||||
environment:
|
|
||||||
DATABASE_NAME: vulcanize_public
|
|
||||||
DATABASE_HOSTNAME: db
|
|
||||||
DATABASE_PORT: 5432
|
|
||||||
DATABASE_USER: vdbm
|
|
||||||
DATABASE_PASSWORD: password
|
|
||||||
ETH_WS_PATH: "dapptools:8546"
|
|
||||||
ETH_HTTP_PATH: "dapptools:8545"
|
|
||||||
ETH_CHAIN_ID: 4
|
|
||||||
ETH_NETWORK_ID: 4
|
|
||||||
VDB_COMMAND: sync
|
|
||||||
|
|
||||||
eth-server:
|
eth-server:
|
||||||
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
- db
|
- ipld-eth-db
|
||||||
build:
|
build:
|
||||||
context: ./
|
context: ./
|
||||||
cache_from:
|
cache_from:
|
||||||
- alpine:latest
|
- alpine:latest
|
||||||
- golang:1.13-alpine
|
- golang:1.13-alpine
|
||||||
environment:
|
environment:
|
||||||
|
IPLD_SERVER_GRAPHQL: "true"
|
||||||
|
IPLD_POSTGRAPHILEPATH: http://graphql:5000
|
||||||
|
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
|
||||||
VDB_COMMAND: "serve"
|
VDB_COMMAND: "serve"
|
||||||
DATABASE_NAME: "vulcanize_public"
|
ETH_CHAIN_CONFIG: "/tmp/chain.json"
|
||||||
DATABASE_HOSTNAME: "db"
|
DATABASE_NAME: "vulcanize_testing"
|
||||||
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
DATABASE_PORT: 5432
|
DATABASE_PORT: 5432
|
||||||
DATABASE_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
DATABASE_PASSWORD: "password"
|
DATABASE_PASSWORD: "password"
|
||||||
SERVER_WS_PATH: "0.0.0.0:8081"
|
ETH_CHAIN_ID: 4
|
||||||
SERVER_HTTP_PATH: "0.0.0.0:8082"
|
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS
|
||||||
|
ETH_PROXY_ON_ERROR: $ETH_PROXY_ON_ERROR
|
||||||
|
ETH_HTTP_PATH: $ETH_HTTP_PATH
|
||||||
|
volumes:
|
||||||
|
- type: bind
|
||||||
|
source: ./chain.json
|
||||||
|
target: /tmp/chain.json
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8080:8080"
|
|
||||||
- "127.0.0.1:8081:8081"
|
- "127.0.0.1:8081:8081"
|
||||||
|
|
||||||
graphql:
|
graphql:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
depends_on:
|
depends_on:
|
||||||
- db
|
- ipld-eth-db
|
||||||
image: vulcanize/postgraphile:v1.0.1
|
image: vulcanize/postgraphile:v1.0.1
|
||||||
environment:
|
environment:
|
||||||
- PG_HOST=db
|
- PG_HOST=db
|
||||||
|
@ -12,7 +12,7 @@ We can expose a number of different APIs for remote access to ipld-eth-server da
|
|||||||
ipld-eth-server stores all processed data in Postgres using PG-IPFS, this includes all of the IPLD objects.
|
ipld-eth-server stores all processed data in Postgres using PG-IPFS, this includes all of the IPLD objects.
|
||||||
[Postgraphile](https://www.graphile.org/postgraphile/) can be used to expose GraphQL endpoints for the Postgres tables.
|
[Postgraphile](https://www.graphile.org/postgraphile/) can be used to expose GraphQL endpoints for the Postgres tables.
|
||||||
|
|
||||||
e.g.
|
e.g.
|
||||||
|
|
||||||
`postgraphile --plugins @graphile/pg-pubsub --subscriptions --simple-subscriptions -c postgres://localhost:5432/vulcanize_public?sslmode=disable -s public,btc,eth -a -j`
|
`postgraphile --plugins @graphile/pg-pubsub --subscriptions --simple-subscriptions -c postgres://localhost:5432/vulcanize_public?sslmode=disable -s public,btc,eth -a -j`
|
||||||
|
|
||||||
@ -33,16 +33,16 @@ by ipld-eth-server to filter and return a requested subset of chain data to the
|
|||||||
An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-server using the `Stream` RPC method is provided below
|
An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-server using the `Stream` RPC method is provided below
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/client"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/watch"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch"
|
||||||
)
|
)
|
||||||
|
|
||||||
config, _ := eth.NewEthSubscriptionConfig()
|
config, _ := eth.NewEthSubscriptionConfig()
|
||||||
@ -153,16 +153,16 @@ the addresses in the `addresses` fields are pre-hashed ETH addresses.
|
|||||||
An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-server using the `Stream` RPC method is provided below
|
An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-server using the `Stream` RPC method is provided below
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/btc"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/btc"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/client"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/client"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/watch"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch"
|
||||||
)
|
)
|
||||||
|
|
||||||
config, _ := btc.NewBtcSubscriptionConfig()
|
config, _ := btc.NewBtcSubscriptionConfig()
|
||||||
|
@ -1,20 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# Runs the db migrations and starts the watcher services
|
|
||||||
|
|
||||||
# Construct the connection string for postgres
|
|
||||||
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
|
|
||||||
|
|
||||||
# Run the DB migrations
|
|
||||||
echo "Connecting with: $VDB_PG_CONNECT"
|
|
||||||
echo "Running database migrations"
|
|
||||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
|
||||||
rv=$?
|
|
||||||
|
|
||||||
if [ $rv != 0 ]; then
|
|
||||||
echo "Could not run migrations. Are the database details correct?"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
echo "Beginning the ipld-eth-server process"
|
echo "Beginning the ipld-eth-server process"
|
||||||
|
|
||||||
|
@ -16,12 +16,15 @@
|
|||||||
graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT
|
graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT
|
||||||
|
|
||||||
[ethereum]
|
[ethereum]
|
||||||
|
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
||||||
chainID = "1" # $ETH_CHAIN_ID
|
chainID = "1" # $ETH_CHAIN_ID
|
||||||
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
|
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
|
||||||
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
|
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
|
||||||
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
|
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
|
||||||
supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
|
supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
|
||||||
|
forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS
|
||||||
|
proxyOnError = true # $ETH_PROXY_ON_ERROR
|
||||||
nodeID = "arch1" # $ETH_NODE_ID
|
nodeID = "arch1" # $ETH_NODE_ID
|
||||||
clientName = "Geth" # $ETH_CLIENT_NAME
|
clientName = "Geth" # $ETH_CLIENT_NAME
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
|
||||||
networkID = "1" # $ETH_NETWORK_ID
|
networkID = "1" # $ETH_NETWORK_ID
|
||||||
|
300
go.mod
300
go.mod
@ -1,29 +1,291 @@
|
|||||||
module github.com/vulcanize/ipld-eth-server
|
module github.com/vulcanize/ipld-eth-server/v3
|
||||||
|
|
||||||
go 1.13
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ethereum/go-ethereum v1.9.25
|
github.com/ethereum/go-ethereum v1.10.18
|
||||||
github.com/graph-gophers/graphql-go v0.0.0-20201003130358-c5bdf3b1108e
|
github.com/graph-gophers/graphql-go v1.3.0
|
||||||
github.com/ipfs/go-block-format v0.0.2
|
github.com/ipfs/go-block-format v0.0.3
|
||||||
github.com/ipfs/go-cid v0.0.7
|
github.com/ipfs/go-cid v0.0.7
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
github.com/ipfs/go-ipfs-blockstore v1.0.1
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
github.com/ipfs/go-ipfs-ds-help v1.0.0
|
||||||
github.com/ipfs/go-ipld-format v0.2.0
|
github.com/ipfs/go-ipld-format v0.2.0
|
||||||
github.com/jmoiron/sqlx v1.2.0
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/lib/pq v1.8.0
|
github.com/jmoiron/sqlx v1.3.5
|
||||||
github.com/multiformats/go-multihash v0.0.14
|
github.com/joho/godotenv v1.4.0
|
||||||
github.com/onsi/ginkgo v1.15.0
|
github.com/lib/pq v1.10.5
|
||||||
github.com/onsi/gomega v1.10.1
|
github.com/machinebox/graphql v0.2.2
|
||||||
github.com/prometheus/client_golang v1.5.1
|
github.com/mailgun/groupcache/v2 v2.3.0
|
||||||
github.com/sirupsen/logrus v1.6.0
|
github.com/multiformats/go-multihash v0.1.0
|
||||||
github.com/spf13/cobra v1.0.0
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/spf13/viper v1.7.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/vulcanize/ipld-eth-indexer v0.7.1-alpha
|
github.com/prometheus/client_golang v1.11.0
|
||||||
github.com/vulcanize/ipfs-ethdb v0.0.2-alpha
|
github.com/sirupsen/logrus v1.8.1
|
||||||
golang.org/x/sys v0.0.0-20210218155724-8ebf48af031b // indirect
|
github.com/spf13/cobra v1.4.0
|
||||||
|
github.com/spf13/viper v1.11.0
|
||||||
|
github.com/thoas/go-funk v0.9.2 // indirect
|
||||||
|
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.2
|
||||||
|
github.com/vulcanize/gap-filler v0.3.1
|
||||||
|
github.com/vulcanize/ipfs-ethdb/v3 v3.0.3
|
||||||
|
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||||
|
gorm.io/driver/postgres v1.3.7
|
||||||
|
gorm.io/gorm v1.23.5
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.9.25 => github.com/vulcanize/go-ethereum v1.9.25-statediff-0.0.15
|
require (
|
||||||
|
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect
|
||||||
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||||
|
github.com/Stebalien/go-bitfield v0.0.1 // indirect
|
||||||
|
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||||
|
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
|
github.com/btcsuite/btcd v0.22.1 // indirect
|
||||||
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
|
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||||
|
github.com/cheekybits/genny v1.0.0 // indirect
|
||||||
|
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||||
|
github.com/cskr/pubsub v1.0.2 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||||
|
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||||
|
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||||
|
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||||
|
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||||
|
github.com/flynn/noise v1.0.0 // indirect
|
||||||
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
|
github.com/friendsofgo/graphiql v0.2.2 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||||
|
github.com/georgysavva/scany v0.2.9 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||||
|
github.com/go-stack/stack v1.8.0 // indirect
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/gopacket v1.1.19 // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/gorilla/websocket v1.4.2 // indirect
|
||||||
|
github.com/graphql-go/graphql v0.7.9 // indirect
|
||||||
|
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||||
|
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||||
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
|
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||||
|
github.com/holiman/uint256 v1.2.0 // indirect
|
||||||
|
github.com/huin/goupnp v1.0.3 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
|
github.com/influxdata/influxdb v1.8.3 // indirect
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||||
|
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||||
|
github.com/ipfs/go-bitswap v0.4.0 // indirect
|
||||||
|
github.com/ipfs/go-blockservice v0.1.7 // indirect
|
||||||
|
github.com/ipfs/go-cidutil v0.0.2 // indirect
|
||||||
|
github.com/ipfs/go-datastore v0.4.6 // indirect
|
||||||
|
github.com/ipfs/go-ds-measure v0.1.0 // indirect
|
||||||
|
github.com/ipfs/go-fetcher v1.5.0 // indirect
|
||||||
|
github.com/ipfs/go-filestore v1.0.0 // indirect
|
||||||
|
github.com/ipfs/go-fs-lock v0.0.7 // indirect
|
||||||
|
github.com/ipfs/go-graphsync v0.8.0 // indirect
|
||||||
|
github.com/ipfs/go-ipfs v0.10.0 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-config v0.16.0 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-exchange-interface v0.0.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-files v0.0.8 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-keystore v0.0.2 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-pinner v0.1.2 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-provider v0.6.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-routing v0.1.0 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||||
|
github.com/ipfs/go-ipld-cbor v0.0.5 // indirect
|
||||||
|
github.com/ipfs/go-ipld-legacy v0.1.0 // indirect
|
||||||
|
github.com/ipfs/go-ipns v0.1.2 // indirect
|
||||||
|
github.com/ipfs/go-log v1.0.5 // indirect
|
||||||
|
github.com/ipfs/go-log/v2 v2.3.0 // indirect
|
||||||
|
github.com/ipfs/go-merkledag v0.4.0 // indirect
|
||||||
|
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||||
|
github.com/ipfs/go-mfs v0.1.2 // indirect
|
||||||
|
github.com/ipfs/go-namesys v0.3.1 // indirect
|
||||||
|
github.com/ipfs/go-path v0.1.2 // indirect
|
||||||
|
github.com/ipfs/go-peertaskqueue v0.4.0 // indirect
|
||||||
|
github.com/ipfs/go-unixfs v0.2.5 // indirect
|
||||||
|
github.com/ipfs/go-unixfsnode v1.1.3 // indirect
|
||||||
|
github.com/ipfs/go-verifcid v0.0.1 // indirect
|
||||||
|
github.com/ipfs/interface-go-ipfs-core v0.5.1 // indirect
|
||||||
|
github.com/ipld/go-codec-dagpb v1.3.0 // indirect
|
||||||
|
github.com/ipld/go-ipld-prime v0.12.2 // indirect
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
|
github.com/jackc/pgconn v1.12.1 // indirect
|
||||||
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
|
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||||
|
github.com/jackc/pgtype v1.11.0 // indirect
|
||||||
|
github.com/jackc/pgx/v4 v4.16.1 // indirect
|
||||||
|
github.com/jackc/puddle v1.2.1 // indirect
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||||
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||||
|
github.com/jinzhu/copier v0.2.4 // indirect
|
||||||
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.11.7 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/koron/go-ssdp v0.0.2 // indirect
|
||||||
|
github.com/libp2p/go-addr-util v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-buffer-pool v0.0.2 // indirect
|
||||||
|
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||||
|
github.com/libp2p/go-conn-security-multistream v0.2.1 // indirect
|
||||||
|
github.com/libp2p/go-doh-resolver v0.3.1 // indirect
|
||||||
|
github.com/libp2p/go-eventbus v0.2.1 // indirect
|
||||||
|
github.com/libp2p/go-flow-metrics v0.0.3 // indirect
|
||||||
|
github.com/libp2p/go-libp2p v0.15.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-autonat v0.4.2 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-blankhost v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-circuit v0.4.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-connmgr v0.2.4 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-core v0.9.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-kad-dht v0.13.1 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-mplex v0.4.1 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-nat v0.0.6 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-noise v0.2.2 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-peerstore v0.2.8 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-pnet v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-pubsub v0.5.4 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-pubsub-router v0.4.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-quic-transport v0.12.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-record v0.1.3 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-swarm v0.5.3 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-tls v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db // indirect
|
||||||
|
github.com/libp2p/go-libp2p-yamux v0.5.4 // indirect
|
||||||
|
github.com/libp2p/go-maddr-filter v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-mplex v0.3.0 // indirect
|
||||||
|
github.com/libp2p/go-msgio v0.0.6 // indirect
|
||||||
|
github.com/libp2p/go-nat v0.0.5 // indirect
|
||||||
|
github.com/libp2p/go-netroute v0.1.6 // indirect
|
||||||
|
github.com/libp2p/go-openssl v0.0.7 // indirect
|
||||||
|
github.com/libp2p/go-reuseport v0.0.2 // indirect
|
||||||
|
github.com/libp2p/go-reuseport-transport v0.0.5 // indirect
|
||||||
|
github.com/libp2p/go-sockaddr v0.1.1 // indirect
|
||||||
|
github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect
|
||||||
|
github.com/libp2p/go-tcp-transport v0.2.8 // indirect
|
||||||
|
github.com/libp2p/go-ws-transport v0.5.0 // indirect
|
||||||
|
github.com/libp2p/go-yamux/v2 v2.2.0 // indirect
|
||||||
|
github.com/libp2p/zeroconf/v2 v2.0.0 // indirect
|
||||||
|
github.com/lucas-clemente/quic-go v0.26.0 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
|
github.com/matryer/is v1.4.0 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
|
github.com/miekg/dns v1.1.43 // indirect
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||||
|
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||||
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr v0.4.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||||
|
github.com/multiformats/go-multicodec v0.3.0 // indirect
|
||||||
|
github.com/multiformats/go-multistream v0.2.2 // indirect
|
||||||
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
|
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||||
|
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
|
||||||
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||||
|
github.com/pganalyze/pg_query_go/v2 v2.1.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
|
||||||
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
|
github.com/prometheus/common v0.30.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
|
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||||
|
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||||
|
github.com/rs/cors v1.7.0 // indirect
|
||||||
|
github.com/segmentio/fasthash v1.0.3 // indirect
|
||||||
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||||
|
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/spf13/afero v1.8.2 // indirect
|
||||||
|
github.com/spf13/cast v1.4.1 // indirect
|
||||||
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
||||||
|
github.com/stretchr/objx v0.2.0 // indirect
|
||||||
|
github.com/stretchr/testify v1.7.1 // indirect
|
||||||
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
|
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||||
|
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||||
|
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
||||||
|
github.com/valyala/fastjson v1.6.3 // indirect
|
||||||
|
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
|
||||||
|
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect
|
||||||
|
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
|
||||||
|
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
||||||
|
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 // indirect
|
||||||
|
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
|
||||||
|
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
|
||||||
|
go.opencensus.io v0.23.0 // indirect
|
||||||
|
go.opentelemetry.io/otel v0.20.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v0.20.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||||
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
|
go.uber.org/dig v1.10.0 // indirect
|
||||||
|
go.uber.org/fx v1.13.1 // indirect
|
||||||
|
go.uber.org/multierr v1.7.0 // indirect
|
||||||
|
go.uber.org/zap v1.19.0 // indirect
|
||||||
|
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||||
|
golang.org/x/text v0.3.7 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||||
|
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||||
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||||
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
|
lukechampine.com/blake3 v1.1.6 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
replace github.com/vulcanize/ipfs-ethdb v0.0.2-alpha => github.com/vulcanize/pg-ipfs-ethdb v0.0.2-alpha
|
replace github.com/ethereum/go-ethereum v1.10.18 => github.com/vulcanize/go-ethereum v1.10.18-statediff-3.2.2
|
||||||
|
2
main.go
2
main.go
@ -18,7 +18,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/cmd"
|
"github.com/vulcanize/ipld-eth-server/v3/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
BIN
pkg/.DS_Store
vendored
Normal file
BIN
pkg/.DS_Store
vendored
Normal file
Binary file not shown.
@ -20,11 +20,10 @@ package client
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/serve"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client is used to subscribe to the ipld-eth-server ipld data stream
|
// Client is used to subscribe to the ipld-eth-server ipld data stream
|
||||||
|
437
pkg/eth/api.go
437
pkg/eth/api.go
@ -18,28 +18,31 @@ package eth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/filters"
|
||||||
"github.com/ethereum/go-ethereum/ethclient"
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace for the watcher's eth api
|
// APIName is the namespace for the watcher's eth api
|
||||||
@ -48,18 +51,27 @@ const APIName = "eth"
|
|||||||
// APIVersion is the version of the watcher's eth api
|
// APIVersion is the version of the watcher's eth api
|
||||||
const APIVersion = "0.0.1"
|
const APIVersion = "0.0.1"
|
||||||
|
|
||||||
|
// PublicEthAPI is the eth namespace API
|
||||||
type PublicEthAPI struct {
|
type PublicEthAPI struct {
|
||||||
// Local db backend
|
// Local db backend
|
||||||
B *Backend
|
B *Backend
|
||||||
|
|
||||||
// Proxy node for forwarding cache misses
|
// Proxy node for forwarding cache misses
|
||||||
supportsStateDiff bool // Whether or not the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss
|
supportsStateDiff bool // Whether the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss
|
||||||
rpc *rpc.Client
|
rpc *rpc.Client
|
||||||
ethClient *ethclient.Client
|
ethClient *ethclient.Client
|
||||||
|
forwardEthCalls bool // if true, forward eth_call calls directly to the configured proxy node
|
||||||
|
proxyOnError bool // turn on regular proxy fall-through on errors; needed to test difference between direct and indirect fall-through
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
|
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
|
||||||
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff bool) *PublicEthAPI {
|
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardEthCalls, proxyOnError bool) (*PublicEthAPI, error) {
|
||||||
|
if forwardEthCalls && client == nil {
|
||||||
|
return nil, errors.New("ipld-eth-server is configured to forward eth_calls to proxy node but no proxy node is configured")
|
||||||
|
}
|
||||||
|
if proxyOnError && client == nil {
|
||||||
|
return nil, errors.New("ipld-eth-server is configured to forward all calls to proxy node on errors but no proxy node is configured")
|
||||||
|
}
|
||||||
var ethClient *ethclient.Client
|
var ethClient *ethclient.Client
|
||||||
if client != nil {
|
if client != nil {
|
||||||
ethClient = ethclient.NewClient(client)
|
ethClient = ethclient.NewClient(client)
|
||||||
@ -69,7 +81,9 @@ func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff bool) *Pu
|
|||||||
supportsStateDiff: supportsStateDiff,
|
supportsStateDiff: supportsStateDiff,
|
||||||
rpc: client,
|
rpc: client,
|
||||||
ethClient: ethClient,
|
ethClient: ethClient,
|
||||||
}
|
forwardEthCalls: forwardEthCalls,
|
||||||
|
proxyOnError: proxyOnError,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -86,12 +100,13 @@ func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.Block
|
|||||||
if header != nil && err == nil {
|
if header != nil && err == nil {
|
||||||
return pea.rpcMarshalHeader(header)
|
return pea.rpcMarshalHeader(header)
|
||||||
}
|
}
|
||||||
if pea.ethClient != nil {
|
if pea.proxyOnError {
|
||||||
if header, err := pea.ethClient.HeaderByNumber(ctx, big.NewInt(number.Int64())); header != nil && err == nil {
|
if header, err := pea.ethClient.HeaderByNumber(ctx, big.NewInt(number.Int64())); header != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(number.Int64())
|
go pea.writeStateDiffAt(number.Int64())
|
||||||
return pea.rpcMarshalHeader(header)
|
return pea.rpcMarshalHeader(header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,7 +118,8 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
|
|||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pea.ethClient != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil {
|
if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(hash)
|
go pea.writeStateDiffFor(hash)
|
||||||
if res, err := pea.rpcMarshalHeader(header); err != nil {
|
if res, err := pea.rpcMarshalHeader(header); err != nil {
|
||||||
@ -111,6 +127,7 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -121,7 +138,9 @@ func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]inte
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fields["totalDifficulty"] = (*hexutil.Big)(td)
|
fields["totalDifficulty"] = (*hexutil.Big)(td)
|
||||||
|
|
||||||
return fields, nil
|
return fields, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,12 +160,14 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
|
|||||||
if block != nil && err == nil {
|
if block != nil && err == nil {
|
||||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||||
}
|
}
|
||||||
if pea.ethClient != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil {
|
if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(number.Int64())
|
go pea.writeStateDiffAt(number.Int64())
|
||||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,15 +178,35 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
|
|||||||
if block != nil && err == nil {
|
if block != nil && err == nil {
|
||||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||||
}
|
}
|
||||||
if pea.ethClient != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil {
|
if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(hash)
|
go pea.writeStateDiffFor(hash)
|
||||||
return pea.rpcMarshalBlock(block, true, fullTx)
|
return pea.rpcMarshalBlock(block, true, fullTx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.
|
||||||
|
func (pea *PublicEthAPI) ChainId() (*hexutil.Big, error) {
|
||||||
|
block, err := pea.B.CurrentBlock()
|
||||||
|
if err != nil {
|
||||||
|
if pea.proxyOnError {
|
||||||
|
if id, err := pea.ethClient.ChainID(context.Background()); err == nil {
|
||||||
|
return (*hexutil.Big)(id), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if config := pea.B.Config.ChainConfig; config.IsEIP155(block.Number()) {
|
||||||
|
return (*hexutil.Big)(config.ChainID), nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block")
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Uncles
|
Uncles
|
||||||
@ -185,12 +226,14 @@ func (pea *PublicEthAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, bloc
|
|||||||
block = types.NewBlockWithHeader(uncles[index])
|
block = types.NewBlockWithHeader(uncles[index])
|
||||||
return pea.rpcMarshalBlock(block, false, false)
|
return pea.rpcMarshalBlock(block, false, false)
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil {
|
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(blockNr.Int64())
|
go pea.writeStateDiffAt(blockNr.Int64())
|
||||||
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
|
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,12 +250,14 @@ func (pea *PublicEthAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockH
|
|||||||
block = types.NewBlockWithHeader(uncles[index])
|
block = types.NewBlockWithHeader(uncles[index])
|
||||||
return pea.rpcMarshalBlock(block, false, false)
|
return pea.rpcMarshalBlock(block, false, false)
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil {
|
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(blockHash)
|
go pea.writeStateDiffFor(blockHash)
|
||||||
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
|
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,13 +267,15 @@ func (pea *PublicEthAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr
|
|||||||
n := hexutil.Uint(len(block.Uncles()))
|
n := hexutil.Uint(len(block.Uncles()))
|
||||||
return &n
|
return &n
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var num *hexutil.Uint
|
var num *hexutil.Uint
|
||||||
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(blockNr.Int64())
|
go pea.writeStateDiffAt(blockNr.Int64())
|
||||||
return num
|
return num
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,13 +285,15 @@ func (pea *PublicEthAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash
|
|||||||
n := hexutil.Uint(len(block.Uncles()))
|
n := hexutil.Uint(len(block.Uncles()))
|
||||||
return &n
|
return &n
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var num *hexutil.Uint
|
var num *hexutil.Uint
|
||||||
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(blockHash)
|
go pea.writeStateDiffFor(blockHash)
|
||||||
return num
|
return num
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,13 +309,15 @@ func (pea *PublicEthAPI) GetTransactionCount(ctx context.Context, address common
|
|||||||
if count != nil && err == nil {
|
if count != nil && err == nil {
|
||||||
return count, nil
|
return count, nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var num *hexutil.Uint64
|
var num *hexutil.Uint64
|
||||||
if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil {
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
return num, nil
|
return num, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,6 +326,7 @@ func (pea *PublicEthAPI) localGetTransactionCount(ctx context.Context, address c
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nonce := hexutil.Uint64(account.Nonce)
|
nonce := hexutil.Uint64(account.Nonce)
|
||||||
return &nonce, nil
|
return &nonce, nil
|
||||||
}
|
}
|
||||||
@ -285,13 +337,15 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByNumber(ctx context.Context, b
|
|||||||
n := hexutil.Uint(len(block.Transactions()))
|
n := hexutil.Uint(len(block.Transactions()))
|
||||||
return &n
|
return &n
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var num *hexutil.Uint
|
var num *hexutil.Uint
|
||||||
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(blockNr.Int64())
|
go pea.writeStateDiffAt(blockNr.Int64())
|
||||||
return num
|
return num
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,13 +355,15 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByHash(ctx context.Context, blo
|
|||||||
n := hexutil.Uint(len(block.Transactions()))
|
n := hexutil.Uint(len(block.Transactions()))
|
||||||
return &n
|
return &n
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var num *hexutil.Uint
|
var num *hexutil.Uint
|
||||||
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(blockHash)
|
go pea.writeStateDiffFor(blockHash)
|
||||||
return num
|
return num
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,13 +372,15 @@ func (pea *PublicEthAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context
|
|||||||
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
|
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
|
||||||
return newRPCTransactionFromBlockIndex(block, uint64(index))
|
return newRPCTransactionFromBlockIndex(block, uint64(index))
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var tx *RPCTransaction
|
var tx *RPCTransaction
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(blockNr.Int64())
|
go pea.writeStateDiffAt(blockNr.Int64())
|
||||||
return tx
|
return tx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -331,13 +389,15 @@ func (pea *PublicEthAPI) GetTransactionByBlockHashAndIndex(ctx context.Context,
|
|||||||
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
|
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
|
||||||
return newRPCTransactionFromBlockIndex(block, uint64(index))
|
return newRPCTransactionFromBlockIndex(block, uint64(index))
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
|
||||||
|
if pea.proxyOnError {
|
||||||
var tx *RPCTransaction
|
var tx *RPCTransaction
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(blockHash)
|
go pea.writeStateDiffFor(blockHash)
|
||||||
return tx
|
return tx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -346,7 +406,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Cont
|
|||||||
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
|
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
|
||||||
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
|
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var tx hexutil.Bytes
|
var tx hexutil.Bytes
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffAt(blockNr.Int64())
|
go pea.writeStateDiffAt(blockNr.Int64())
|
||||||
@ -361,7 +421,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex
|
|||||||
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
|
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
|
||||||
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
|
return newRPCRawTransactionFromBlockIndex(block, uint64(index))
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var tx hexutil.Bytes
|
var tx hexutil.Bytes
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(blockHash)
|
go pea.writeStateDiffFor(blockHash)
|
||||||
@ -376,9 +436,14 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex
|
|||||||
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
|
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
|
||||||
tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash)
|
tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash)
|
||||||
if tx != nil && err == nil {
|
if tx != nil && err == nil {
|
||||||
return NewRPCTransaction(tx, blockHash, blockNumber, index), nil
|
header, err := pea.B.HeaderByHash(ctx, blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewRPCTransaction(tx, blockHash, blockNumber, index, header.BaseFee), nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var tx *RPCTransaction
|
var tx *RPCTransaction
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByHash", hash); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByHash", hash); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(hash)
|
go pea.writeStateDiffFor(hash)
|
||||||
@ -395,7 +460,7 @@ func (pea *PublicEthAPI) GetRawTransactionByHash(ctx context.Context, hash commo
|
|||||||
if tx != nil && err == nil {
|
if tx != nil && err == nil {
|
||||||
return rlp.EncodeToBytes(tx)
|
return rlp.EncodeToBytes(tx)
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var tx hexutil.Bytes
|
var tx hexutil.Bytes
|
||||||
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByHash", hash); tx != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByHash", hash); tx != nil && err == nil {
|
||||||
go pea.writeStateDiffFor(hash)
|
go pea.writeStateDiffFor(hash)
|
||||||
@ -417,7 +482,7 @@ func (pea *PublicEthAPI) GetTransactionReceipt(ctx context.Context, hash common.
|
|||||||
if receipt != nil && err == nil {
|
if receipt != nil && err == nil {
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
if receipt := pea.remoteGetTransactionReceipt(ctx, hash); receipt != nil {
|
if receipt := pea.remoteGetTransactionReceipt(ctx, hash); receipt != nil {
|
||||||
go pea.writeStateDiffFor(hash)
|
go pea.writeStateDiffFor(hash)
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
@ -439,6 +504,14 @@ func (pea *PublicEthAPI) localGetTransactionReceipt(ctx context.Context, hash co
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
block, err := pea.B.BlockByHash(ctx, blockHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = receipts.DeriveFields(pea.B.Config.ChainConfig, blockHash, blockNumber, block.Transactions())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if len(receipts) <= int(index) {
|
if len(receipts) <= int(index) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -505,30 +578,30 @@ func (pea *PublicEthAPI) remoteGetTransactionReceipt(ctx context.Context, hash c
|
|||||||
// GetLogs returns logs matching the given argument that are stored within the state.
|
// GetLogs returns logs matching the given argument that are stored within the state.
|
||||||
//
|
//
|
||||||
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
|
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
|
||||||
func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery) ([]*types.Log, error) {
|
func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) {
|
||||||
logs, err := pea.localGetLogs(ctx, crit)
|
logs, err := pea.localGetLogs(crit)
|
||||||
if err != nil && pea.rpc != nil {
|
if err != nil && pea.proxyOnError {
|
||||||
if arg, err := toFilterArg(crit); err == nil {
|
var res []*types.Log
|
||||||
var res []*types.Log
|
if err := pea.rpc.CallContext(ctx, &res, "eth_getLogs", crit); err == nil {
|
||||||
if err := pea.rpc.CallContext(ctx, &res, "eth_getLogs", arg); err == nil {
|
go pea.writeStateDiffWithCriteria(crit)
|
||||||
go pea.writeStateDiffWithCriteria(crit)
|
return res, nil
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return logs, err
|
return logs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pea *PublicEthAPI) localGetLogs(ctx context.Context, crit ethereum.FilterQuery) ([]*types.Log, error) {
|
func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log, error) {
|
||||||
// TODO: this can be optimized away from using the old cid retriever and ipld fetcher interfaces
|
// TODO: this can be optimized away from using the old cid retriever and ipld fetcher interfaces
|
||||||
// Convert FilterQuery into ReceiptFilter
|
// Convert FilterQuery into ReceiptFilter
|
||||||
addrStrs := make([]string, len(crit.Addresses))
|
addrStrs := make([]string, len(crit.Addresses))
|
||||||
for i, addr := range crit.Addresses {
|
for i, addr := range crit.Addresses {
|
||||||
addrStrs[i] = addr.String()
|
addrStrs[i] = addr.String()
|
||||||
}
|
}
|
||||||
topicStrSets := make([][]string, 4)
|
|
||||||
|
topicStrSets := make([][]string, len(crit.Topics))
|
||||||
for i, topicSet := range crit.Topics {
|
for i, topicSet := range crit.Topics {
|
||||||
if i > 3 {
|
if i > 3 {
|
||||||
|
topicStrSets = topicStrSets[:4]
|
||||||
// don't allow more than 4 topics
|
// don't allow more than 4 topics
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -557,21 +630,16 @@ func (pea *PublicEthAPI) localGetLogs(ctx context.Context, crit ethereum.FilterQ
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// If we have a blockhash to filter on, fire off single retrieval query
|
// If we have a blockHash to filter on, fire off single retrieval query
|
||||||
if crit.BlockHash != nil {
|
if crit.BlockHash != nil {
|
||||||
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
|
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, 0, crit.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, rctCIDs)
|
|
||||||
if err != nil {
|
return decomposeLogs(filteredLogs)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return extractLogsOfInterest(rctIPLDs, filter.Topics)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, create block range from criteria
|
// Otherwise, create block range from criteria
|
||||||
// nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number
|
// nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number
|
||||||
startingBlock := crit.FromBlock
|
startingBlock := crit.FromBlock
|
||||||
@ -579,6 +647,7 @@ func (pea *PublicEthAPI) localGetLogs(ctx context.Context, crit ethereum.FilterQ
|
|||||||
if startingBlock == nil {
|
if startingBlock == nil {
|
||||||
startingBlock = common.Big0
|
startingBlock = common.Big0
|
||||||
}
|
}
|
||||||
|
|
||||||
if endingBlock == nil {
|
if endingBlock == nil {
|
||||||
endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber()
|
endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -586,24 +655,28 @@ func (pea *PublicEthAPI) localGetLogs(ctx context.Context, crit ethereum.FilterQ
|
|||||||
}
|
}
|
||||||
endingBlock = big.NewInt(endingBlockInt)
|
endingBlock = big.NewInt(endingBlockInt)
|
||||||
}
|
}
|
||||||
|
|
||||||
start := startingBlock.Int64()
|
start := startingBlock.Int64()
|
||||||
end := endingBlock.Int64()
|
end := endingBlock.Int64()
|
||||||
allRctCIDs := make([]eth.ReceiptModel, 0)
|
var logs []*types.Log
|
||||||
for i := start; i <= end; i++ {
|
for i := start; i <= end; i++ {
|
||||||
rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
|
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, i, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
allRctCIDs = append(allRctCIDs, rctCIDs...)
|
|
||||||
}
|
logCIDs, err := decomposeLogs(filteredLogs)
|
||||||
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, allRctCIDs)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return nil, err
|
}
|
||||||
|
|
||||||
|
logs = append(logs, logCIDs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logs, err := extractLogsOfInterest(rctIPLDs, filter.Topics)
|
|
||||||
return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
|
return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -621,13 +694,17 @@ func (pea *PublicEthAPI) GetBalance(ctx context.Context, address common.Address,
|
|||||||
if bal != nil && err == nil {
|
if bal != nil && err == nil {
|
||||||
return bal, nil
|
return bal, nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var res *hexutil.Big
|
var res *hexutil.Big
|
||||||
if err := pea.rpc.CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash); res != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash); res != nil && err == nil {
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return (*hexutil.Big)(big.NewInt(0)), nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -645,15 +722,28 @@ func (pea *PublicEthAPI) localGetBalance(ctx context.Context, address common.Add
|
|||||||
func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
|
func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
|
||||||
storageVal, err := pea.B.GetStorageByNumberOrHash(ctx, address, common.HexToHash(key), blockNrOrHash)
|
storageVal, err := pea.B.GetStorageByNumberOrHash(ctx, address, common.HexToHash(key), blockNrOrHash)
|
||||||
if storageVal != nil && err == nil {
|
if storageVal != nil && err == nil {
|
||||||
return storageVal, nil
|
var value common.Hash
|
||||||
|
_, content, _, err := rlp.Split(storageVal)
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
return hexutil.Bytes{}, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value.SetBytes(content)
|
||||||
|
|
||||||
|
return value[:], nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var res hexutil.Bytes
|
var res hexutil.Bytes
|
||||||
if err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash); res != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash); res != nil && err == nil {
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return make([]byte, 32), nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,13 +753,17 @@ func (pea *PublicEthAPI) GetCode(ctx context.Context, address common.Address, bl
|
|||||||
if code != nil && err == nil {
|
if code != nil && err == nil {
|
||||||
return code, nil
|
return code, nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var res hexutil.Bytes
|
var res hexutil.Bytes
|
||||||
if err := pea.rpc.CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash); res != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash); res != nil && err == nil {
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return code, nil
|
||||||
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -679,7 +773,7 @@ func (pea *PublicEthAPI) GetProof(ctx context.Context, address common.Address, s
|
|||||||
if proof != nil && err == nil {
|
if proof != nil && err == nil {
|
||||||
return proof, nil
|
return proof, nil
|
||||||
}
|
}
|
||||||
if pea.rpc != nil {
|
if pea.proxyOnError {
|
||||||
var res *AccountResult
|
var res *AccountResult
|
||||||
if err := pea.rpc.CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash); res != nil && err == nil {
|
if err := pea.rpc.CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash); res != nil && err == nil {
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
@ -738,50 +832,59 @@ func (pea *PublicEthAPI) localGetProof(ctx context.Context, address common.Addre
|
|||||||
}, state.Error()
|
}, state.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call executes the given transaction on the state for the given block number.
|
// revertError is an API error that encompassas an EVM revertal with JSON error
|
||||||
//
|
// code and a binary data blob.
|
||||||
// Additionally, the caller can specify a batch of contract for fields overriding.
|
type revertError struct {
|
||||||
//
|
error
|
||||||
// Note, this function doesn't make and changes in the state/blockchain and is
|
reason string // revert reason hex encoded
|
||||||
// useful to execute and retrieve values.
|
|
||||||
func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *map[common.Address]account) (hexutil.Bytes, error) {
|
|
||||||
var accounts map[common.Address]account
|
|
||||||
if overrides != nil {
|
|
||||||
accounts = *overrides
|
|
||||||
}
|
|
||||||
res, _, failed, err := DoCall(ctx, pea.B, args, blockNrOrHash, accounts, 5*time.Second, pea.B.Config.RPCGasCap)
|
|
||||||
if (failed || err != nil) && pea.rpc != nil {
|
|
||||||
var hex hexutil.Bytes
|
|
||||||
if err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides); hex != nil && err == nil {
|
|
||||||
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
|
||||||
return hex, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if failed && err == nil {
|
|
||||||
return nil, errors.New("eth_call failed without error")
|
|
||||||
}
|
|
||||||
return (hexutil.Bytes)(res), err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
|
// ErrorCode returns the JSON error code for a revertal.
|
||||||
defer func(start time.Time) {
|
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
|
||||||
logrus.Debugf("Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
|
func (e *revertError) ErrorCode() int {
|
||||||
}(time.Now())
|
return 3
|
||||||
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
|
}
|
||||||
if state == nil || err != nil {
|
|
||||||
return nil, 0, false, err
|
// ErrorData returns the hex encoded revert reason.
|
||||||
|
func (e *revertError) ErrorData() interface{} {
|
||||||
|
return e.reason
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRevertError(result *core.ExecutionResult) *revertError {
|
||||||
|
reason, errUnpack := abi.UnpackRevert(result.Revert())
|
||||||
|
err := errors.New("execution reverted")
|
||||||
|
if errUnpack == nil {
|
||||||
|
err = fmt.Errorf("execution reverted: %v", reason)
|
||||||
}
|
}
|
||||||
// Set sender address or use a default if none specified
|
return &revertError{
|
||||||
var addr common.Address
|
error: err,
|
||||||
if args.From == nil {
|
reason: hexutil.Encode(result.Revert()),
|
||||||
if b.Config.DefaultSender != nil {
|
|
||||||
addr = *b.Config.DefaultSender
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
addr = *args.From
|
|
||||||
}
|
}
|
||||||
// Override the fields of specified contracts before execution.
|
}
|
||||||
for addr, account := range overrides {
|
|
||||||
|
// OverrideAccount indicates the overriding fields of account during the execution
|
||||||
|
// of a message call.
|
||||||
|
// Note, state and stateDiff can't be specified at the same time. If state is
|
||||||
|
// set, message execution will only use the data in the given state. Otherwise
|
||||||
|
// if statDiff is set, all diff will be applied first and then execute the call
|
||||||
|
// message.
|
||||||
|
type OverrideAccount struct {
|
||||||
|
Nonce *hexutil.Uint64 `json:"nonce"`
|
||||||
|
Code *hexutil.Bytes `json:"code"`
|
||||||
|
Balance **hexutil.Big `json:"balance"`
|
||||||
|
State *map[common.Hash]common.Hash `json:"state"`
|
||||||
|
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateOverride is the collection of overridden accounts.
|
||||||
|
type StateOverride map[common.Address]OverrideAccount
|
||||||
|
|
||||||
|
// Apply overrides the fields of specified accounts into the given state.
|
||||||
|
func (diff *StateOverride) Apply(state *state.StateDB) error {
|
||||||
|
if diff == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for addr, account := range *diff {
|
||||||
// Override account nonce.
|
// Override account nonce.
|
||||||
if account.Nonce != nil {
|
if account.Nonce != nil {
|
||||||
state.SetNonce(addr, uint64(*account.Nonce))
|
state.SetNonce(addr, uint64(*account.Nonce))
|
||||||
@ -795,7 +898,7 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
|
|||||||
state.SetBalance(addr, (*big.Int)(*account.Balance))
|
state.SetBalance(addr, (*big.Int)(*account.Balance))
|
||||||
}
|
}
|
||||||
if account.State != nil && account.StateDiff != nil {
|
if account.State != nil && account.StateDiff != nil {
|
||||||
return nil, 0, false, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
|
return fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
|
||||||
}
|
}
|
||||||
// Replace entire state if caller requires.
|
// Replace entire state if caller requires.
|
||||||
if account.State != nil {
|
if account.State != nil {
|
||||||
@ -808,32 +911,56 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set default gas & gas price if none were set
|
return nil
|
||||||
gas := uint64(math.MaxUint64 / 2)
|
}
|
||||||
if args.Gas != nil {
|
|
||||||
gas = uint64(*args.Gas)
|
// Call executes the given transaction on the state for the given block number.
|
||||||
}
|
//
|
||||||
if globalGasCap != nil && globalGasCap.Uint64() < gas {
|
// Additionally, the caller can specify a batch of contract for fields overriding.
|
||||||
logrus.Warnf("Caller gas above allowance, capping; requested: %d, cap: %d", gas, globalGasCap)
|
//
|
||||||
gas = globalGasCap.Uint64()
|
// Note, this function doesn't make and changes in the state/blockchain and is
|
||||||
}
|
// useful to execute and retrieve values.
|
||||||
gasPrice := new(big.Int).SetUint64(params.GWei)
|
func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) {
|
||||||
if args.GasPrice != nil {
|
if pea.forwardEthCalls {
|
||||||
gasPrice = args.GasPrice.ToInt()
|
var hex hexutil.Bytes
|
||||||
|
err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides)
|
||||||
|
return hex, err
|
||||||
}
|
}
|
||||||
|
|
||||||
value := new(big.Int)
|
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, 5*time.Second, pea.B.Config.RPCGasCap.Uint64())
|
||||||
if args.Value != nil {
|
|
||||||
value = args.Value.ToInt()
|
// If the result contains a revert reason, try to unpack and return it.
|
||||||
|
if err == nil {
|
||||||
|
if len(result.Revert()) > 0 {
|
||||||
|
err = newRevertError(result)
|
||||||
|
} else if result.Err != nil {
|
||||||
|
err = result.Err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var data []byte
|
if err != nil && pea.proxyOnError {
|
||||||
if args.Data != nil {
|
var hex hexutil.Bytes
|
||||||
data = []byte(*args.Data)
|
if err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides); hex != nil && err == nil {
|
||||||
|
go pea.writeStateDiffAtOrFor(blockNrOrHash)
|
||||||
|
return hex, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result.Return(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
|
||||||
|
defer func(start time.Time) {
|
||||||
|
logrus.Debugf("Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
|
||||||
|
}(time.Now())
|
||||||
|
|
||||||
|
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
|
||||||
|
if state == nil || err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create new call message
|
if err := overrides.Apply(state); err != nil {
|
||||||
msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, false)
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Setup context so it may be cancelled the call has completed
|
// Setup context so it may be cancelled the call has completed
|
||||||
// or, in case of unmetered gas, setup a context with a timeout.
|
// or, in case of unmetered gas, setup a context with a timeout.
|
||||||
@ -848,10 +975,16 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Get a new instance of the EVM.
|
// Get a new instance of the EVM.
|
||||||
evm, err := b.GetEVM(ctx, msg, state, header)
|
msg, err := args.ToMessage(globalGasCap, header.BaseFee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, false, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
evm, vmError, err := b.GetEVM(ctx, msg, state, header)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Wait for the context to be done and cancel the evm. Even if the
|
// Wait for the context to be done and cancel the evm. Even if the
|
||||||
// EVM has finished, cancelling may be done (repeatedly)
|
// EVM has finished, cancelling may be done (repeatedly)
|
||||||
go func() {
|
go func() {
|
||||||
@ -859,18 +992,21 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
|
|||||||
evm.Cancel()
|
evm.Cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Setup the gas pool (also for unmetered requests)
|
// Execute the message.
|
||||||
// and apply the message.
|
|
||||||
gp := new(core.GasPool).AddGas(math.MaxUint64)
|
gp := new(core.GasPool).AddGas(math.MaxUint64)
|
||||||
result, err := core.ApplyMessage(evm, msg, gp)
|
result, err := core.ApplyMessage(evm, msg, gp)
|
||||||
if err != nil {
|
if err := vmError(); err != nil {
|
||||||
return nil, 0, false, fmt.Errorf("execution failed: %v", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the timer caused an abort, return an appropriate error message
|
// If the timer caused an abort, return an appropriate error message
|
||||||
if evm.Cancelled() {
|
if evm.Cancelled() {
|
||||||
return nil, 0, false, fmt.Errorf("execution aborted (timeout = %v)", timeout)
|
return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout)
|
||||||
}
|
}
|
||||||
return result.Return(), result.UsedGas, result.Failed(), err
|
if err != nil {
|
||||||
|
return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas())
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeStateDiffAtOrFor calls out to the proxy statediffing geth client to fill in a gap in the index
|
// writeStateDiffAtOrFor calls out to the proxy statediffing geth client to fill in a gap in the index
|
||||||
@ -889,7 +1025,7 @@ func (pea *PublicEthAPI) writeStateDiffAtOrFor(blockNrOrHash rpc.BlockNumberOrHa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// writeStateDiffWithCriteria calls out to the proxy statediffing geth client to fill in a gap in the index
|
// writeStateDiffWithCriteria calls out to the proxy statediffing geth client to fill in a gap in the index
|
||||||
func (pea *PublicEthAPI) writeStateDiffWithCriteria(crit ethereum.FilterQuery) {
|
func (pea *PublicEthAPI) writeStateDiffWithCriteria(crit filters.FilterCriteria) {
|
||||||
// short circuit right away if the proxy doesn't support diffing
|
// short circuit right away if the proxy doesn't support diffing
|
||||||
if !pea.supportsStateDiff {
|
if !pea.supportsStateDiff {
|
||||||
return
|
return
|
||||||
@ -994,3 +1130,42 @@ func toHexSlice(b [][]byte) []string {
|
|||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decomposeLogs return logs from LogResult.
|
||||||
|
func decomposeLogs(logCIDs []LogResult) ([]*types.Log, error) {
|
||||||
|
logs := make([]*types.Log, len(logCIDs))
|
||||||
|
for i, l := range logCIDs {
|
||||||
|
topics := make([]common.Hash, 0)
|
||||||
|
if l.Topic0 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic0))
|
||||||
|
}
|
||||||
|
if l.Topic1 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic1))
|
||||||
|
}
|
||||||
|
if l.Topic2 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic2))
|
||||||
|
}
|
||||||
|
if l.Topic3 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic3))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: should we convert string to uint ?
|
||||||
|
blockNum, err := strconv.ParseUint(l.BlockNumber, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logs[i] = &types.Log{
|
||||||
|
Address: common.HexToAddress(l.Address),
|
||||||
|
Topics: topics,
|
||||||
|
Data: l.Data,
|
||||||
|
BlockNumber: blockNum,
|
||||||
|
TxHash: common.HexToHash(l.TxHash),
|
||||||
|
TxIndex: uint(l.TxnIndex),
|
||||||
|
BlockHash: common.HexToHash(l.BlockHash),
|
||||||
|
Index: uint(l.Index),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return logs, nil
|
||||||
|
}
|
||||||
|
@ -18,34 +18,40 @@ package eth_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"math/big"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/filters"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
|
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||||
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||||
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
|
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
|
||||||
wrongNumber = rpc.BlockNumber(number + 1)
|
londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum.Int64())
|
||||||
blockHash = test_helpers.MockBlock.Header().Hash()
|
wrongNumber = number + 1
|
||||||
ctx = context.Background()
|
blockHash = test_helpers.MockBlock.Header().Hash()
|
||||||
expectedBlock = map[string]interface{}{
|
baseFee = test_helpers.MockLondonBlock.BaseFee()
|
||||||
|
ctx = context.Background()
|
||||||
|
expectedBlock = map[string]interface{}{
|
||||||
"number": (*hexutil.Big)(test_helpers.MockBlock.Number()),
|
"number": (*hexutil.Big)(test_helpers.MockBlock.Number()),
|
||||||
"hash": test_helpers.MockBlock.Hash(),
|
"hash": test_helpers.MockBlock.Hash(),
|
||||||
"parentHash": test_helpers.MockBlock.ParentHash(),
|
"parentHash": test_helpers.MockBlock.ParentHash(),
|
||||||
@ -125,13 +131,14 @@ var (
|
|||||||
"receiptsRoot": test_helpers.MockUncles[1].ReceiptHash,
|
"receiptsRoot": test_helpers.MockUncles[1].ReceiptHash,
|
||||||
"uncles": []common.Hash{},
|
"uncles": []common.Hash{},
|
||||||
}
|
}
|
||||||
expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 0)
|
expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 0, test_helpers.MockBlock.BaseFee())
|
||||||
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1)
|
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1, test_helpers.MockBlock.BaseFee())
|
||||||
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2)
|
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2, test_helpers.MockBlock.BaseFee())
|
||||||
expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0])
|
expectedLondonTransaction = eth.NewRPCTransaction(test_helpers.MockLondonTransactions[0], test_helpers.MockLondonBlock.Hash(), test_helpers.MockLondonBlock.NumberU64(), 0, test_helpers.MockLondonBlock.BaseFee())
|
||||||
expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1])
|
expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0])
|
||||||
expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2])
|
expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1])
|
||||||
expectedReceipt = map[string]interface{}{
|
expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2])
|
||||||
|
expectedReceipt = map[string]interface{}{
|
||||||
"blockHash": blockHash,
|
"blockHash": blockHash,
|
||||||
"blockNumber": hexutil.Uint64(uint64(number.Int64())),
|
"blockNumber": hexutil.Uint64(uint64(number.Int64())),
|
||||||
"transactionHash": expectedTransaction.Hash,
|
"transactionHash": expectedTransaction.Hash,
|
||||||
@ -143,7 +150,7 @@ var (
|
|||||||
"contractAddress": nil,
|
"contractAddress": nil,
|
||||||
"logs": test_helpers.MockReceipts[0].Logs,
|
"logs": test_helpers.MockReceipts[0].Logs,
|
||||||
"logsBloom": test_helpers.MockReceipts[0].Bloom,
|
"logsBloom": test_helpers.MockReceipts[0].Bloom,
|
||||||
"root": hexutil.Bytes(test_helpers.MockReceipts[0].PostState),
|
"status": hexutil.Uint(test_helpers.MockReceipts[0].Status),
|
||||||
}
|
}
|
||||||
expectedReceipt2 = map[string]interface{}{
|
expectedReceipt2 = map[string]interface{}{
|
||||||
"blockHash": blockHash,
|
"blockHash": blockHash,
|
||||||
@ -168,7 +175,7 @@ var (
|
|||||||
"to": expectedTransaction3.To,
|
"to": expectedTransaction3.To,
|
||||||
"gasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].GasUsed),
|
"gasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].GasUsed),
|
||||||
"cumulativeGasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].CumulativeGasUsed),
|
"cumulativeGasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].CumulativeGasUsed),
|
||||||
"contractAddress": nil,
|
"contractAddress": test_helpers.ContractAddress,
|
||||||
"logs": test_helpers.MockReceipts[2].Logs,
|
"logs": test_helpers.MockReceipts[2].Logs,
|
||||||
"logsBloom": test_helpers.MockReceipts[2].Bloom,
|
"logsBloom": test_helpers.MockReceipts[2].Bloom,
|
||||||
"root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState),
|
"root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState),
|
||||||
@ -177,32 +184,75 @@ var (
|
|||||||
|
|
||||||
var _ = Describe("API", func() {
|
var _ = Describe("API", func() {
|
||||||
var (
|
var (
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
api *eth.PublicEthAPI
|
api *eth.PublicEthAPI
|
||||||
|
chainConfig = params.TestChainConfig
|
||||||
)
|
)
|
||||||
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
|
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
|
||||||
// Note: if you focus one of the tests be sure to focus this and the defered It()
|
// Note: if you focus one of the tests be sure to focus this and the defered It()
|
||||||
It("test init", func() {
|
It("test init", func() {
|
||||||
var err error
|
var (
|
||||||
db, err = shared.SetupDB()
|
err error
|
||||||
|
tx interfaces.Batch
|
||||||
|
)
|
||||||
|
|
||||||
|
db = shared.SetupDB()
|
||||||
|
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
|
|
||||||
|
backend, err := eth.NewEthBackend(db, ð.Config{
|
||||||
|
ChainConfig: chainConfig,
|
||||||
|
VMConfig: vm.Config{},
|
||||||
|
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||||
|
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||||
|
StateDB: ethServerShared.GroupConfig{
|
||||||
|
Name: "api_test",
|
||||||
|
CacheSizeInMB: 8,
|
||||||
|
CacheExpiryInMins: 60,
|
||||||
|
LogStatsIntervalInSecs: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
indexAndPublisher := eth2.NewIPLDPublisher(db)
|
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
|
||||||
backend, err := eth.NewEthBackend(db, ð.Config{})
|
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
api = eth.NewPublicEthAPI(backend, nil, false)
|
|
||||||
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload)
|
ccHash := sdtypes.CodeAndCodeHash{
|
||||||
|
Hash: test_helpers.ContractCodeHash,
|
||||||
|
Code: test_helpers.ContractCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
|
|
||||||
|
for _, node := range test_helpers.MockStateNodes {
|
||||||
|
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
uncles := test_helpers.MockBlock.Uncles()
|
uncles := test_helpers.MockBlock.Uncles()
|
||||||
uncleHashes := make([]common.Hash, len(uncles))
|
uncleHashes := make([]common.Hash, len(uncles))
|
||||||
for i, uncle := range uncles {
|
for i, uncle := range uncles {
|
||||||
uncleHashes[i] = uncle.Hash()
|
uncleHashes[i] = uncle.Hash()
|
||||||
}
|
}
|
||||||
expectedBlock["uncles"] = uncleHashes
|
expectedBlock["uncles"] = uncleHashes
|
||||||
|
|
||||||
|
// setting chain config to for london block
|
||||||
|
chainConfig.LondonBlock = big.NewInt(2)
|
||||||
|
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
|
|
||||||
|
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Single test db tear down at end of all tests
|
// Single test db tear down at end of all tests
|
||||||
defer It("test teardown", func() { eth.TearDownDB(db) })
|
defer It("test teardown", func() { shared.TearDownDB(db) })
|
||||||
/*
|
/*
|
||||||
|
|
||||||
Headers and blocks
|
Headers and blocks
|
||||||
@ -220,8 +270,6 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
||||||
Expect(header).To(BeNil())
|
Expect(header).To(BeNil())
|
||||||
_, err = api.B.DB.Beginx()
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -242,7 +290,7 @@ var _ = Describe("API", func() {
|
|||||||
bn := api.BlockNumber()
|
bn := api.BlockNumber()
|
||||||
ubn := (uint64)(bn)
|
ubn := (uint64)(bn)
|
||||||
subn := strconv.FormatUint(ubn, 10)
|
subn := strconv.FormatUint(ubn, 10)
|
||||||
Expect(subn).To(Equal(test_helpers.BlockNumber.String()))
|
Expect(subn).To(Equal(test_helpers.LondonBlockNum.String()))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -271,10 +319,20 @@ var _ = Describe("API", func() {
|
|||||||
Expect(val).To(Equal(block[key]))
|
Expect(val).To(Equal(block[key]))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
It("Throws an error if a block cannot be found", func() {
|
It("Returns `nil` if a block cannot be found", func() {
|
||||||
_, err := api.GetBlockByNumber(ctx, wrongNumber, false)
|
block, err := api.GetBlockByNumber(ctx, wrongNumber, false)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(block).To(BeNil())
|
||||||
|
})
|
||||||
|
It("Fetch BaseFee from london block by block number, returns `nil` for legacy block", func() {
|
||||||
|
block, err := api.GetBlockByNumber(ctx, number, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
_, ok := block["baseFee"]
|
||||||
|
Expect(ok).To(Equal(false))
|
||||||
|
|
||||||
|
block, err = api.GetBlockByNumber(ctx, londonBlockNum, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -303,10 +361,19 @@ var _ = Describe("API", func() {
|
|||||||
Expect(val).To(Equal(block[key]))
|
Expect(val).To(Equal(block[key]))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
It("Throws an error if a block cannot be found", func() {
|
It("Returns `nil` if a block cannot be found", func() {
|
||||||
_, err := api.GetBlockByHash(ctx, randomHash, false)
|
block, err := api.GetBlockByHash(ctx, randomHash, false)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(block).To(BeZero())
|
||||||
|
})
|
||||||
|
It("Fetch BaseFee from london block by block hash, returns `nil` for legacy block", func() {
|
||||||
|
block, err := api.GetBlockByHash(ctx, test_helpers.MockBlock.Hash(), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
_, ok := block["baseFee"]
|
||||||
|
Expect(ok).To(Equal(false))
|
||||||
|
block, err = api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -325,10 +392,10 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(uncle2).To(Equal(expectedUncle2))
|
Expect(uncle2).To(Equal(expectedUncle2))
|
||||||
})
|
})
|
||||||
It("Throws an error if an block for blocknumber cannot be found", func() {
|
It("Returns `nil` if an block for block number cannot be found", func() {
|
||||||
_, err := api.GetUncleByBlockNumberAndIndex(ctx, wrongNumber, 0)
|
block, err := api.GetUncleByBlockNumberAndIndex(ctx, wrongNumber, 0)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(block).To(BeNil())
|
||||||
})
|
})
|
||||||
It("Returns `nil` if an uncle at the provided index does not exist for the block found for the provided block number", func() {
|
It("Returns `nil` if an uncle at the provided index does not exist for the block found for the provided block number", func() {
|
||||||
uncle, err := api.GetUncleByBlockNumberAndIndex(ctx, number, 2)
|
uncle, err := api.GetUncleByBlockNumberAndIndex(ctx, number, 2)
|
||||||
@ -346,10 +413,10 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(uncle2).To(Equal(expectedUncle2))
|
Expect(uncle2).To(Equal(expectedUncle2))
|
||||||
})
|
})
|
||||||
It("Throws an error if an block for blockhash cannot be found", func() {
|
It("Returns `nil` if a block for blockhash cannot be found", func() {
|
||||||
_, err := api.GetUncleByBlockHashAndIndex(ctx, randomHash, 0)
|
block, err := api.GetUncleByBlockHashAndIndex(ctx, randomHash, 0)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(block).To(BeNil())
|
||||||
})
|
})
|
||||||
It("Returns `nil` if an uncle at the provided index does not exist for the block with the provided hash", func() {
|
It("Returns `nil` if an uncle at the provided index does not exist for the block with the provided hash", func() {
|
||||||
uncle, err := api.GetUncleByBlockHashAndIndex(ctx, blockHash, 2)
|
uncle, err := api.GetUncleByBlockHashAndIndex(ctx, blockHash, 2)
|
||||||
@ -361,6 +428,7 @@ var _ = Describe("API", func() {
|
|||||||
Describe("eth_getUncleCountByBlockNumber", func() {
|
Describe("eth_getUncleCountByBlockNumber", func() {
|
||||||
It("Retrieves the number of uncles for the canonical block with the provided number", func() {
|
It("Retrieves the number of uncles for the canonical block with the provided number", func() {
|
||||||
count := api.GetUncleCountByBlockNumber(ctx, number)
|
count := api.GetUncleCountByBlockNumber(ctx, number)
|
||||||
|
Expect(*count).NotTo(Equal(nil))
|
||||||
Expect(uint64(*count)).To(Equal(uint64(2)))
|
Expect(uint64(*count)).To(Equal(uint64(2)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -368,6 +436,7 @@ var _ = Describe("API", func() {
|
|||||||
Describe("eth_getUncleCountByBlockHash", func() {
|
Describe("eth_getUncleCountByBlockHash", func() {
|
||||||
It("Retrieves the number of uncles for the block with the provided hash", func() {
|
It("Retrieves the number of uncles for the block with the provided hash", func() {
|
||||||
count := api.GetUncleCountByBlockHash(ctx, blockHash)
|
count := api.GetUncleCountByBlockHash(ctx, blockHash)
|
||||||
|
Expect(*count).NotTo(Equal(nil))
|
||||||
Expect(uint64(*count)).To(Equal(uint64(2)))
|
Expect(uint64(*count)).To(Equal(uint64(2)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -402,14 +471,14 @@ var _ = Describe("API", func() {
|
|||||||
Describe("eth_getBlockTransactionCountByNumber", func() {
|
Describe("eth_getBlockTransactionCountByNumber", func() {
|
||||||
It("Retrieves the number of transactions in the canonical block with the provided number", func() {
|
It("Retrieves the number of transactions in the canonical block with the provided number", func() {
|
||||||
count := api.GetBlockTransactionCountByNumber(ctx, number)
|
count := api.GetBlockTransactionCountByNumber(ctx, number)
|
||||||
Expect(uint64(*count)).To(Equal(uint64(3)))
|
Expect(uint64(*count)).To(Equal(uint64(4)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("eth_getBlockTransactionCountByHash", func() {
|
Describe("eth_getBlockTransactionCountByHash", func() {
|
||||||
It("Retrieves the number of transactions in the block with the provided hash ", func() {
|
It("Retrieves the number of transactions in the block with the provided hash ", func() {
|
||||||
count := api.GetBlockTransactionCountByHash(ctx, blockHash)
|
count := api.GetBlockTransactionCountByHash(ctx, blockHash)
|
||||||
Expect(uint64(*count)).To(Equal(uint64(3)))
|
Expect(uint64(*count)).To(Equal(uint64(4)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -427,6 +496,13 @@ var _ = Describe("API", func() {
|
|||||||
Expect(tx).ToNot(BeNil())
|
Expect(tx).ToNot(BeNil())
|
||||||
Expect(tx).To(Equal(expectedTransaction3))
|
Expect(tx).To(Equal(expectedTransaction3))
|
||||||
})
|
})
|
||||||
|
It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() {
|
||||||
|
tx := api.GetTransactionByBlockNumberAndIndex(ctx, londonBlockNum, 0)
|
||||||
|
Expect(tx).ToNot(BeNil())
|
||||||
|
Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap())))
|
||||||
|
Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap())))
|
||||||
|
Expect(tx).To(Equal(expectedLondonTransaction))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("eth_getTransactionByBlockHashAndIndex", func() {
|
Describe("eth_getTransactionByBlockHashAndIndex", func() {
|
||||||
@ -443,6 +519,14 @@ var _ = Describe("API", func() {
|
|||||||
Expect(tx).ToNot(BeNil())
|
Expect(tx).ToNot(BeNil())
|
||||||
Expect(tx).To(Equal(expectedTransaction3))
|
Expect(tx).To(Equal(expectedTransaction3))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() {
|
||||||
|
tx := api.GetTransactionByBlockHashAndIndex(ctx, test_helpers.MockLondonBlock.Hash(), 0)
|
||||||
|
Expect(tx).ToNot(BeNil())
|
||||||
|
Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap())))
|
||||||
|
Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap())))
|
||||||
|
Expect(tx).To(Equal(expectedLondonTransaction))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("eth_getRawTransactionByBlockNumberAndIndex", func() {
|
Describe("eth_getRawTransactionByBlockNumberAndIndex", func() {
|
||||||
@ -554,7 +638,65 @@ var _ = Describe("API", func() {
|
|||||||
|
|
||||||
Describe("eth_getLogs", func() {
|
Describe("eth_getLogs", func() {
|
||||||
It("Retrieves receipt logs that match the provided topics within the provided range", func() {
|
It("Retrieves receipt logs that match the provided topics within the provided range", func() {
|
||||||
crit := ethereum.FilterQuery{
|
crit := filters.FilterCriteria{
|
||||||
|
Topics: [][]common.Hash{
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0c"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0a"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0b"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
FromBlock: test_helpers.MockBlock.Number(),
|
||||||
|
ToBlock: test_helpers.MockBlock.Number(),
|
||||||
|
}
|
||||||
|
logs, err := api.GetLogs(ctx, crit)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
|
||||||
|
crit = filters.FilterCriteria{
|
||||||
|
Topics: [][]common.Hash{
|
||||||
|
{
|
||||||
|
common.HexToHash("0x08"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0a"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0c"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
FromBlock: test_helpers.MockBlock.Number(),
|
||||||
|
ToBlock: test_helpers.MockBlock.Number(),
|
||||||
|
}
|
||||||
|
logs, err = api.GetLogs(ctx, crit)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
|
||||||
|
crit = filters.FilterCriteria{
|
||||||
|
Topics: [][]common.Hash{
|
||||||
|
{
|
||||||
|
common.HexToHash("0x09"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0a"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
common.HexToHash("0x0b"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
FromBlock: test_helpers.MockBlock.Number(),
|
||||||
|
ToBlock: test_helpers.MockBlock.Number(),
|
||||||
|
}
|
||||||
|
logs, err = api.GetLogs(ctx, crit)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(len(logs)).To(Equal(1))
|
||||||
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog4}))
|
||||||
|
|
||||||
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -563,12 +705,12 @@ var _ = Describe("API", func() {
|
|||||||
FromBlock: test_helpers.MockBlock.Number(),
|
FromBlock: test_helpers.MockBlock.Number(),
|
||||||
ToBlock: test_helpers.MockBlock.Number(),
|
ToBlock: test_helpers.MockBlock.Number(),
|
||||||
}
|
}
|
||||||
logs, err := api.GetLogs(ctx, crit)
|
logs, err = api.GetLogs(ctx, crit)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -583,7 +725,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(2))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -598,7 +740,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -614,7 +756,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(logs)).To(Equal(0))
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -631,7 +773,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x05"),
|
common.HexToHash("0x05"),
|
||||||
@ -648,7 +790,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x05"),
|
common.HexToHash("0x05"),
|
||||||
@ -666,7 +808,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
common.HexToHash("0x04"),
|
common.HexToHash("0x04"),
|
||||||
@ -685,7 +827,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(2))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
@ -700,7 +842,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
@ -715,20 +857,20 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
Topics: [][]common.Hash{},
|
Topics: [][]common.Hash{},
|
||||||
FromBlock: test_helpers.MockBlock.Number(),
|
FromBlock: test_helpers.MockBlock.Number(),
|
||||||
ToBlock: test_helpers.MockBlock.Number(),
|
ToBlock: test_helpers.MockBlock.Number(),
|
||||||
}
|
}
|
||||||
logs, err = api.GetLogs(ctx, crit)
|
logs, err = api.GetLogs(ctx, crit)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(6))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6}))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Uses the provided blockhash if one is provided", func() {
|
It("Uses the provided blockhash if one is provided", func() {
|
||||||
hash := test_helpers.MockBlock.Hash()
|
hash := test_helpers.MockBlock.Hash()
|
||||||
crit := ethereum.FilterQuery{
|
crit := filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{},
|
{},
|
||||||
@ -742,7 +884,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -758,7 +900,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{},
|
{},
|
||||||
@ -772,7 +914,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -788,7 +930,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -803,7 +945,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(logs)).To(Equal(0))
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -820,7 +962,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(1))
|
Expect(len(logs)).To(Equal(1))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -834,7 +976,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(2))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{
|
Topics: [][]common.Hash{
|
||||||
{
|
{
|
||||||
@ -852,19 +994,19 @@ var _ = Describe("API", func() {
|
|||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(2))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
||||||
|
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Topics: [][]common.Hash{},
|
Topics: [][]common.Hash{},
|
||||||
}
|
}
|
||||||
logs, err = api.GetLogs(ctx, crit)
|
logs, err = api.GetLogs(ctx, crit)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(logs)).To(Equal(2))
|
Expect(len(logs)).To(Equal(6))
|
||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6}))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Filters on contract address if any are provided", func() {
|
It("Filters on contract address if any are provided", func() {
|
||||||
hash := test_helpers.MockBlock.Hash()
|
hash := test_helpers.MockBlock.Hash()
|
||||||
crit := ethereum.FilterQuery{
|
crit := filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Addresses: []common.Address{
|
Addresses: []common.Address{
|
||||||
test_helpers.Address,
|
test_helpers.Address,
|
||||||
@ -886,7 +1028,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
|
||||||
|
|
||||||
hash = test_helpers.MockBlock.Hash()
|
hash = test_helpers.MockBlock.Hash()
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Addresses: []common.Address{
|
Addresses: []common.Address{
|
||||||
test_helpers.Address,
|
test_helpers.Address,
|
||||||
@ -909,7 +1051,7 @@ var _ = Describe("API", func() {
|
|||||||
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
|
||||||
|
|
||||||
hash = test_helpers.MockBlock.Hash()
|
hash = test_helpers.MockBlock.Hash()
|
||||||
crit = ethereum.FilterQuery{
|
crit = filters.FilterCriteria{
|
||||||
BlockHash: &hash,
|
BlockHash: &hash,
|
||||||
Addresses: []common.Address{
|
Addresses: []common.Address{
|
||||||
test_helpers.Address,
|
test_helpers.Address,
|
||||||
@ -948,8 +1090,17 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
})
|
})
|
||||||
It("Throws an error for an account it cannot find the balance for", func() {
|
It("Retrieves the eth balance for the non-existing account address at the block with the provided hash", func() {
|
||||||
_, err := api.GetBalance(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
bal, err := api.GetBalance(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
|
})
|
||||||
|
It("Throws an error for an account of a non-existing block hash", func() {
|
||||||
|
_, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithHash(randomHash, true))
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
})
|
||||||
|
It("Throws an error for an account of a non-existing block number", func() {
|
||||||
|
_, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithNumber(wrongNumber))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -965,26 +1116,10 @@ var _ = Describe("API", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||||
})
|
})
|
||||||
It("Throws an error for an account it cannot find the code for", func() {
|
It("Returns `nil` for an account it cannot find the code for", func() {
|
||||||
_, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(code).To(BeEmpty())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func publishCode(db *postgres.DB, codeHash common.Hash, code []byte) error {
|
|
||||||
tx, err := db.Beginx()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
|
|
||||||
if err != nil {
|
|
||||||
tx.Rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := shared.PublishDirect(tx, mhKey, code); err != nil {
|
|
||||||
tx.Rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return tx.Commit()
|
|
||||||
}
|
|
||||||
|
@ -18,13 +18,14 @@ package eth
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
"github.com/ethereum/go-ethereum/consensus"
|
||||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
@ -38,49 +39,62 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
|
||||||
|
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
|
||||||
|
|
||||||
"github.com/vulcanize/ipfs-ethdb"
|
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
shared2 "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errPendingBlockNumber = errors.New("pending block number not supported")
|
errPendingBlockNumber = errors.New("pending block number not supported")
|
||||||
errNegativeBlockNumber = errors.New("negative block number not supported")
|
errNegativeBlockNumber = errors.New("negative block number not supported")
|
||||||
|
errHeaderHashNotFound = errors.New("header for hash not found")
|
||||||
|
errHeaderNotFound = errors.New("header not found")
|
||||||
|
|
||||||
|
// errMissingSignature is returned if a block's extra-data section doesn't seem
|
||||||
|
// to contain a 65 byte secp256k1 signature.
|
||||||
|
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||||
WHERE id = (SELECT canonical_header_id($1))`
|
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||||
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||||
WHERE id = (SELECT canonical_header_id($1))`
|
WHERE block_hash = (SELECT canonical_header_hash($1))`
|
||||||
RetrieveTD = `SELECT td FROM eth.header_cids
|
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids
|
||||||
WHERE header_cids.block_hash = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
|
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
|
||||||
WHERE blocks.key = transaction_cids.mh_key
|
WHERE blocks.key = transaction_cids.mh_key
|
||||||
AND transaction_cids.header_id = header_cids.id
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
AND transaction_cids.tx_hash = $1`
|
AND transaction_cids.tx_hash = $1`
|
||||||
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
|
||||||
WHERE state_accounts.state_id = state_cids.id
|
WHERE state_accounts.header_id = state_cids.header_id AND state_accounts.state_path = state_cids.state_path
|
||||||
AND state_cids.header_id = header_cids.id
|
AND state_cids.header_id = header_cids.block_hash
|
||||||
AND state_leaf_key = $1
|
AND state_leaf_key = $1
|
||||||
AND block_number <= (SELECT block_number
|
AND block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $2)
|
WHERE block_hash = $2)
|
||||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
StateDBGroupCacheName = "statedb"
|
||||||
|
)
|
||||||
|
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
// underlying postgres db
|
// underlying postgres db
|
||||||
DB *postgres.DB
|
DB *sqlx.DB
|
||||||
|
|
||||||
// postgres db interfaces
|
// postgres db interfaces
|
||||||
Retriever *CIDRetriever
|
Retriever *CIDRetriever
|
||||||
@ -95,15 +109,30 @@ type Backend struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
ChainConfig *params.ChainConfig
|
ChainConfig *params.ChainConfig
|
||||||
VmConfig vm.Config
|
VMConfig vm.Config
|
||||||
DefaultSender *common.Address
|
DefaultSender *common.Address
|
||||||
RPCGasCap *big.Int
|
RPCGasCap *big.Int
|
||||||
|
GroupCacheConfig *shared.GroupCacheConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEthBackend(db *postgres.DB, c *Config) (*Backend, error) {
|
func NewEthBackend(db *sqlx.DB, c *Config) (*Backend, error) {
|
||||||
|
gcc := c.GroupCacheConfig
|
||||||
|
|
||||||
|
groupName := gcc.StateDB.Name
|
||||||
|
if groupName == "" {
|
||||||
|
groupName = StateDBGroupCacheName
|
||||||
|
}
|
||||||
|
|
||||||
r := NewCIDRetriever(db)
|
r := NewCIDRetriever(db)
|
||||||
ethDB := ipfsethdb.NewDatabase(db.DB)
|
ethDB := ipfsethdb.NewDatabase(db, ipfsethdb.CacheConfig{
|
||||||
|
Name: groupName,
|
||||||
|
Size: gcc.StateDB.CacheSizeInMB * 1024 * 1024,
|
||||||
|
ExpiryDuration: time.Minute * time.Duration(gcc.StateDB.CacheExpiryInMins),
|
||||||
|
})
|
||||||
|
|
||||||
|
logStateDBStatsOnTimer(ethDB.(*ipfsethdb.Database), gcc)
|
||||||
|
|
||||||
return &Backend{
|
return &Backend{
|
||||||
DB: db,
|
DB: db,
|
||||||
Retriever: r,
|
Retriever: r,
|
||||||
@ -174,7 +203,11 @@ func (b *Backend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Bl
|
|||||||
if header == nil {
|
if header == nil {
|
||||||
return nil, errors.New("header for hash not found")
|
return nil, errors.New("header for hash not found")
|
||||||
}
|
}
|
||||||
if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
|
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
|
||||||
return nil, errors.New("hash is not currently canonical")
|
return nil, errors.New("hash is not currently canonical")
|
||||||
}
|
}
|
||||||
return header, nil
|
return header, nil
|
||||||
@ -197,9 +230,9 @@ func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CurrentBlock returns the current block
|
// CurrentBlock returns the current block
|
||||||
func (b *Backend) CurrentBlock() *types.Block {
|
func (b *Backend) CurrentBlock() (*types.Block, error) {
|
||||||
block, _ := b.BlockByNumber(context.Background(), rpc.LatestBlockNumber)
|
block, err := b.BlockByNumber(context.Background(), rpc.LatestBlockNumber)
|
||||||
return block
|
return block, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockByNumberOrHash returns block by number or hash
|
// BlockByNumberOrHash returns block by number or hash
|
||||||
@ -215,7 +248,11 @@ func (b *Backend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Blo
|
|||||||
if header == nil {
|
if header == nil {
|
||||||
return nil, errors.New("header for hash not found")
|
return nil, errors.New("header for hash not found")
|
||||||
}
|
}
|
||||||
if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
|
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
|
||||||
return nil, errors.New("hash is not currently canonical")
|
return nil, errors.New("hash is not currently canonical")
|
||||||
}
|
}
|
||||||
block, err := b.BlockByHash(ctx, hash)
|
block, err := b.BlockByHash(ctx, hash)
|
||||||
@ -253,14 +290,20 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
|||||||
return nil, errNegativeBlockNumber
|
return nil, errNegativeBlockNumber
|
||||||
}
|
}
|
||||||
// Get the canonical hash
|
// Get the canonical hash
|
||||||
canonicalHash := b.GetCanonicalHash(uint64(number))
|
canonicalHash, err := b.GetCanonicalHash(uint64(number))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Retrieve all the CIDs for the block
|
// Retrieve all the CIDs for the block
|
||||||
// TODO: optimize this by retrieving iplds directly rather than the cids first (this is remanent from when we fetched iplds through ipfs blockservice interface)
|
// TODO: optimize this by retrieving iplds directly rather than the cids first (this is remanent from when we fetched iplds through ipfs blockservice interface)
|
||||||
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(canonicalHash)
|
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(canonicalHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,49 +324,65 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Fetch and decode the header IPLD
|
// Fetch and decode the header IPLD
|
||||||
headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
|
var headerIPLD models.IPLDModel
|
||||||
|
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var header types.Header
|
var header types.Header
|
||||||
if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
|
err = rlp.DecodeBytes(headerIPLD.Data, &header)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Fetch and decode the uncle IPLDs
|
// Fetch and decode the uncle IPLDs
|
||||||
uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
|
var uncleIPLDs []models.IPLDModel
|
||||||
|
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var uncles []*types.Header
|
var uncles []*types.Header
|
||||||
for _, uncleIPLD := range uncleIPLDs {
|
for _, uncleIPLD := range uncleIPLDs {
|
||||||
var uncle types.Header
|
var uncle types.Header
|
||||||
if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
|
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uncles = append(uncles, &uncle)
|
uncles = append(uncles, &uncle)
|
||||||
}
|
}
|
||||||
// Fetch and decode the transaction IPLDs
|
// Fetch and decode the transaction IPLDs
|
||||||
txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
|
var txIPLDs []models.IPLDModel
|
||||||
|
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var transactions []*types.Transaction
|
var transactions []*types.Transaction
|
||||||
for _, txIPLD := range txIPLDs {
|
for _, txIPLD := range txIPLDs {
|
||||||
var transaction types.Transaction
|
var transaction types.Transaction
|
||||||
if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
|
err = transaction.UnmarshalBinary(txIPLD.Data)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
transactions = append(transactions, &transaction)
|
transactions = append(transactions, &transaction)
|
||||||
}
|
}
|
||||||
// Fetch and decode the receipt IPLDs
|
// Fetch and decode the receipt IPLDs
|
||||||
rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
|
var rctIPLDs []models.IPLDModel
|
||||||
|
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var receipts []*types.Receipt
|
var receipts []*types.Receipt
|
||||||
for _, rctIPLD := range rctIPLDs {
|
for _, rctIPLD := range rctIPLDs {
|
||||||
var receipt types.Receipt
|
var receipt types.Receipt
|
||||||
if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
|
nodeVal, err := DecodeLeafNode(rctIPLD.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = receipt.UnmarshalBinary(nodeVal)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
receipts = append(receipts, &receipt)
|
receipts = append(receipts, &receipt)
|
||||||
@ -338,6 +397,9 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
|||||||
// Retrieve all the CIDs for the block
|
// Retrieve all the CIDs for the block
|
||||||
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash)
|
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,49 +420,74 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Fetch and decode the header IPLD
|
// Fetch and decode the header IPLD
|
||||||
headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
|
var headerIPLD models.IPLDModel
|
||||||
|
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var header types.Header
|
var header types.Header
|
||||||
if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
|
err = rlp.DecodeBytes(headerIPLD.Data, &header)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Fetch and decode the uncle IPLDs
|
// Fetch and decode the uncle IPLDs
|
||||||
uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
|
var uncleIPLDs []models.IPLDModel
|
||||||
|
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var uncles []*types.Header
|
var uncles []*types.Header
|
||||||
for _, uncleIPLD := range uncleIPLDs {
|
for _, uncleIPLD := range uncleIPLDs {
|
||||||
var uncle types.Header
|
var uncle types.Header
|
||||||
if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
|
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uncles = append(uncles, &uncle)
|
uncles = append(uncles, &uncle)
|
||||||
}
|
}
|
||||||
// Fetch and decode the transaction IPLDs
|
// Fetch and decode the transaction IPLDs
|
||||||
txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
|
var txIPLDs []models.IPLDModel
|
||||||
|
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var transactions []*types.Transaction
|
var transactions []*types.Transaction
|
||||||
for _, txIPLD := range txIPLDs {
|
for _, txIPLD := range txIPLDs {
|
||||||
var transaction types.Transaction
|
var transaction types.Transaction
|
||||||
if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
|
err = transaction.UnmarshalBinary(txIPLD.Data)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
transactions = append(transactions, &transaction)
|
transactions = append(transactions, &transaction)
|
||||||
}
|
}
|
||||||
// Fetch and decode the receipt IPLDs
|
// Fetch and decode the receipt IPLDs
|
||||||
rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
|
var rctIPLDs []models.IPLDModel
|
||||||
|
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var receipts []*types.Receipt
|
var receipts []*types.Receipt
|
||||||
for _, rctIPLD := range rctIPLDs {
|
for _, rctIPLD := range rctIPLDs {
|
||||||
var receipt types.Receipt
|
var receipt types.Receipt
|
||||||
if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
|
nodeVal, err := DecodeLeafNode(rctIPLD.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = receipt.UnmarshalBinary(nodeVal)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
receipts = append(receipts, &receipt)
|
receipts = append(receipts, &receipt)
|
||||||
@ -422,7 +509,7 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
|
|||||||
return nil, common.Hash{}, 0, 0, err
|
return nil, common.Hash{}, 0, 0, err
|
||||||
}
|
}
|
||||||
var transaction types.Transaction
|
var transaction types.Transaction
|
||||||
if err := rlp.DecodeBytes(tempTxStruct.Data, &transaction); err != nil {
|
if err := transaction.UnmarshalBinary(tempTxStruct.Data); err != nil {
|
||||||
return nil, common.Hash{}, 0, 0, err
|
return nil, common.Hash{}, 0, 0, err
|
||||||
}
|
}
|
||||||
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
|
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
|
||||||
@ -430,16 +517,17 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
|
|||||||
|
|
||||||
// GetReceipts retrieves receipts for provided block hash
|
// GetReceipts retrieves receipts for provided block hash
|
||||||
func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
||||||
_, receiptBytes, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rcts := make(types.Receipts, len(receiptBytes))
|
rcts := make(types.Receipts, len(receiptBytes))
|
||||||
for i, rctBytes := range receiptBytes {
|
for i, rctBytes := range receiptBytes {
|
||||||
rct := new(types.Receipt)
|
rct := new(types.Receipt)
|
||||||
if err := rlp.DecodeBytes(rctBytes, rct); err != nil {
|
if err := rct.UnmarshalBinary(rctBytes); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
rct.TxHash = txs[i]
|
||||||
rcts[i] = rct
|
rcts[i] = rct
|
||||||
}
|
}
|
||||||
return rcts, nil
|
return rcts, nil
|
||||||
@ -447,7 +535,7 @@ func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Rece
|
|||||||
|
|
||||||
// GetLogs returns all the logs for the given block hash
|
// GetLogs returns all the logs for the given block hash
|
||||||
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
|
||||||
_, receiptBytes, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -457,6 +545,11 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
|
|||||||
if err := rlp.DecodeBytes(rctBytes, &rct); err != nil {
|
if err := rlp.DecodeBytes(rctBytes, &rct); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, log := range rct.Logs {
|
||||||
|
log.TxHash = txs[i]
|
||||||
|
}
|
||||||
|
|
||||||
logs[i] = rct.Logs
|
logs[i] = rct.Logs
|
||||||
}
|
}
|
||||||
return logs, nil
|
return logs, nil
|
||||||
@ -475,7 +568,11 @@ func (b *Backend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHas
|
|||||||
if header == nil {
|
if header == nil {
|
||||||
return nil, nil, errors.New("header for hash not found")
|
return nil, nil, errors.New("header for hash not found")
|
||||||
}
|
}
|
||||||
if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
|
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
|
||||||
return nil, nil, errors.New("hash is not currently canonical")
|
return nil, nil, errors.New("hash is not currently canonical")
|
||||||
}
|
}
|
||||||
stateDb, err := state.New(header.Root, b.StateDatabase, nil)
|
stateDb, err := state.New(header.Root, b.StateDatabase, nil)
|
||||||
@ -503,12 +600,12 @@ func (b *Backend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCanonicalHash gets the canonical hash for the provided number, if there is one
|
// GetCanonicalHash gets the canonical hash for the provided number, if there is one
|
||||||
func (b *Backend) GetCanonicalHash(number uint64) common.Hash {
|
func (b *Backend) GetCanonicalHash(number uint64) (common.Hash, error) {
|
||||||
var hashResult string
|
var hashResult string
|
||||||
if err := b.DB.Get(&hashResult, RetrieveCanonicalBlockHashByNumber, number); err != nil {
|
if err := b.DB.Get(&hashResult, RetrieveCanonicalBlockHashByNumber, number); err != nil {
|
||||||
return common.Hash{}
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
return common.HexToHash(hashResult)
|
return common.HexToHash(hashResult), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type rowResult struct {
|
type rowResult struct {
|
||||||
@ -523,17 +620,17 @@ func (b *Backend) GetCanonicalHeader(number uint64) (string, []byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetEVM constructs and returns a vm.EVM
|
// GetEVM constructs and returns a vm.EVM
|
||||||
func (b *Backend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, error) {
|
func (b *Backend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {
|
||||||
state.SetBalance(msg.From(), math.MaxBig256)
|
vmError := func() error { return nil }
|
||||||
vmctx := core.NewEVMBlockContext(header, b, nil)
|
|
||||||
txContext := core.NewEVMTxContext(msg)
|
txContext := core.NewEVMTxContext(msg)
|
||||||
return vm.NewEVM(vmctx, txContext, state, b.Config.ChainConfig, b.Config.VmConfig), nil
|
context := core.NewEVMBlockContext(header, b, nil)
|
||||||
|
return vm.NewEVM(context, txContext, state, b.Config.ChainConfig, b.Config.VMConfig), vmError, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAccountByNumberOrHash returns the account object for the provided address at the block corresponding to the provided number or hash
|
// GetAccountByNumberOrHash returns the account object for the provided address at the block corresponding to the provided number or hash
|
||||||
func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*state.Account, error) {
|
func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*types.StateAccount, error) {
|
||||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||||
return b.GetAccountByNumber(ctx, address, uint64(blockNr.Int64()))
|
return b.GetAccountByNumber(ctx, address, blockNr)
|
||||||
}
|
}
|
||||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||||
return b.GetAccountByHash(ctx, address, hash)
|
return b.GetAccountByHash(ctx, address, hash)
|
||||||
@ -542,28 +639,56 @@ func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.A
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetAccountByNumber returns the account object for the provided address at the canonical block at the provided height
|
// GetAccountByNumber returns the account object for the provided address at the canonical block at the provided height
|
||||||
func (b *Backend) GetAccountByNumber(ctx context.Context, address common.Address, number uint64) (*state.Account, error) {
|
func (b *Backend) GetAccountByNumber(ctx context.Context, address common.Address, blockNumber rpc.BlockNumber) (*types.StateAccount, error) {
|
||||||
hash := b.GetCanonicalHash(number)
|
var err error
|
||||||
if hash == (common.Hash{}) {
|
number := blockNumber.Int64()
|
||||||
return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
|
if blockNumber == rpc.LatestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveLastBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
if blockNumber == rpc.EarliestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveFirstBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if blockNumber == rpc.PendingBlockNumber {
|
||||||
|
return nil, errPendingBlockNumber
|
||||||
|
}
|
||||||
|
hash, err := b.GetCanonicalHash(uint64(number))
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, errHeaderNotFound
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return b.GetAccountByHash(ctx, address, hash)
|
return b.GetAccountByHash(ctx, address, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAccountByHash returns the account object for the provided address at the block with the provided hash
|
// GetAccountByHash returns the account object for the provided address at the block with the provided hash
|
||||||
func (b *Backend) GetAccountByHash(ctx context.Context, address common.Address, hash common.Hash) (*state.Account, error) {
|
func (b *Backend) GetAccountByHash(ctx context.Context, address common.Address, hash common.Hash) (*types.StateAccount, error) {
|
||||||
|
_, err := b.HeaderByHash(context.Background(), hash)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, errHeaderHashNotFound
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
_, accountRlp, err := b.IPLDRetriever.RetrieveAccountByAddressAndBlockHash(address, hash)
|
_, accountRlp, err := b.IPLDRetriever.RetrieveAccountByAddressAndBlockHash(address, hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
acct := new(state.Account)
|
|
||||||
|
acct := new(types.StateAccount)
|
||||||
return acct, rlp.DecodeBytes(accountRlp, acct)
|
return acct, rlp.DecodeBytes(accountRlp, acct)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCodeByNumberOrHash returns the byte code for the contract deployed at the provided address at the block with the provided hash or block number
|
// GetCodeByNumberOrHash returns the byte code for the contract deployed at the provided address at the block with the provided hash or block number
|
||||||
func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) ([]byte, error) {
|
func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) ([]byte, error) {
|
||||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||||
return b.GetCodeByNumber(ctx, address, uint64(blockNr.Int64()))
|
return b.GetCodeByNumber(ctx, address, blockNr)
|
||||||
}
|
}
|
||||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||||
return b.GetCodeByHash(ctx, address, hash)
|
return b.GetCodeByHash(ctx, address, hash)
|
||||||
@ -572,8 +697,28 @@ func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Addr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCodeByNumber returns the byte code for the contract deployed at the provided address at the canonical block with the provided block number
|
// GetCodeByNumber returns the byte code for the contract deployed at the provided address at the canonical block with the provided block number
|
||||||
func (b *Backend) GetCodeByNumber(ctx context.Context, address common.Address, number uint64) ([]byte, error) {
|
func (b *Backend) GetCodeByNumber(ctx context.Context, address common.Address, blockNumber rpc.BlockNumber) ([]byte, error) {
|
||||||
hash := b.GetCanonicalHash(number)
|
var err error
|
||||||
|
number := blockNumber.Int64()
|
||||||
|
if blockNumber == rpc.LatestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveLastBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if blockNumber == rpc.EarliestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveFirstBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if blockNumber == rpc.PendingBlockNumber {
|
||||||
|
return nil, errPendingBlockNumber
|
||||||
|
}
|
||||||
|
hash, err := b.GetCanonicalHash(uint64(number))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if hash == (common.Hash{}) {
|
if hash == (common.Hash{}) {
|
||||||
return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
|
return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
|
||||||
}
|
}
|
||||||
@ -599,10 +744,12 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
|
|||||||
err = tx.Commit()
|
err = tx.Commit()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := tx.Get(&codeHash, RetrieveCodeHashByLeafKeyAndBlockHash, leafKey.Hex(), hash.Hex()); err != nil {
|
err = tx.Get(&codeHash, RetrieveCodeHashByLeafKeyAndBlockHash, leafKey.Hex(), hash.Hex())
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mhKey, err := shared2.MultihashKeyFromKeccak256(common.BytesToHash(codeHash))
|
var mhKey string
|
||||||
|
mhKey, err = ethServerShared.MultihashKeyFromKeccak256(common.BytesToHash(codeHash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -612,28 +759,55 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetStorageByNumberOrHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided number or hash
|
// GetStorageByNumberOrHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided number or hash
|
||||||
func (b *Backend) GetStorageByNumberOrHash(ctx context.Context, address common.Address, storageLeafKey common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
|
func (b *Backend) GetStorageByNumberOrHash(ctx context.Context, address common.Address, key common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
|
||||||
if blockNr, ok := blockNrOrHash.Number(); ok {
|
if blockNr, ok := blockNrOrHash.Number(); ok {
|
||||||
return b.GetStorageByNumber(ctx, address, storageLeafKey, uint64(blockNr.Int64()))
|
return b.GetStorageByNumber(ctx, address, key, blockNr)
|
||||||
}
|
}
|
||||||
if hash, ok := blockNrOrHash.Hash(); ok {
|
if hash, ok := blockNrOrHash.Hash(); ok {
|
||||||
return b.GetStorageByHash(ctx, address, storageLeafKey, hash)
|
return b.GetStorageByHash(ctx, address, key, hash)
|
||||||
}
|
}
|
||||||
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
return nil, errors.New("invalid arguments; neither block nor hash specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStorageByNumber returns the storage value for the provided contract address an storage key at the block corresponding to the provided number
|
// GetStorageByNumber returns the storage value for the provided contract address an storage key at the block corresponding to the provided number
|
||||||
func (b *Backend) GetStorageByNumber(ctx context.Context, address common.Address, storageLeafKey common.Hash, number uint64) (hexutil.Bytes, error) {
|
func (b *Backend) GetStorageByNumber(ctx context.Context, address common.Address, key common.Hash, blockNumber rpc.BlockNumber) (hexutil.Bytes, error) {
|
||||||
hash := b.GetCanonicalHash(number)
|
var err error
|
||||||
if hash == (common.Hash{}) {
|
number := blockNumber.Int64()
|
||||||
return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
|
if blockNumber == rpc.LatestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveLastBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return b.GetStorageByHash(ctx, address, storageLeafKey, hash)
|
if blockNumber == rpc.EarliestBlockNumber {
|
||||||
|
number, err = b.Retriever.RetrieveFirstBlockNumber()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if blockNumber == rpc.PendingBlockNumber {
|
||||||
|
return nil, errPendingBlockNumber
|
||||||
|
}
|
||||||
|
hash, err := b.GetCanonicalHash(uint64(number))
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, errHeaderNotFound
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.GetStorageByHash(ctx, address, key, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStorageByHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided hash
|
// GetStorageByHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided hash
|
||||||
func (b *Backend) GetStorageByHash(ctx context.Context, address common.Address, storageLeafKey, hash common.Hash) (hexutil.Bytes, error) {
|
func (b *Backend) GetStorageByHash(ctx context.Context, address common.Address, key, hash common.Hash) (hexutil.Bytes, error) {
|
||||||
_, storageRlp, err := b.IPLDRetriever.RetrieveStorageAtByAddressAndStorageKeyAndBlockHash(address, storageLeafKey, hash)
|
_, err := b.HeaderByHash(context.Background(), hash)
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil, errHeaderHashNotFound
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, storageRlp, err := b.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address, key, hash)
|
||||||
return storageRlp, err
|
return storageRlp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,6 +826,11 @@ func (b *Backend) GetHeader(hash common.Hash, height uint64) *types.Header {
|
|||||||
return header
|
return header
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateTrie validates the trie for the given stateRoot
|
||||||
|
func (b *Backend) ValidateTrie(stateRoot common.Hash) error {
|
||||||
|
return validator.NewValidator(nil, b.EthDB).ValidateTrie(stateRoot)
|
||||||
|
}
|
||||||
|
|
||||||
// RPCGasCap returns the configured gas cap for the rpc server
|
// RPCGasCap returns the configured gas cap for the rpc server
|
||||||
func (b *Backend) RPCGasCap() *big.Int {
|
func (b *Backend) RPCGasCap() *big.Int {
|
||||||
return b.Config.RPCGasCap
|
return b.Config.RPCGasCap
|
||||||
@ -684,3 +863,18 @@ func (b *Backend) BloomStatus() (uint64, uint64) {
|
|||||||
func (b *Backend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
func (b *Backend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func logStateDBStatsOnTimer(ethDB *ipfsethdb.Database, gcc *shared.GroupCacheConfig) {
|
||||||
|
// No stats logging if interval isn't a positive integer.
|
||||||
|
if gcc.StateDB.LogStatsIntervalInSecs <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Duration(gcc.StateDB.LogStatsIntervalInSecs) * time.Second)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for range ticker.C {
|
||||||
|
log.Infof("%s groupcache stats: %+v", StateDBGroupCacheName, ethDB.GetCacheStats())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
@ -23,20 +23,18 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RPCMarshalHeader converts the given header to the RPC output.
|
// RPCMarshalHeader converts the given header to the RPC output.
|
||||||
// This function is eth/internal so we have to make our own version here...
|
// This function is eth/internal so we have to make our own version here...
|
||||||
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
||||||
return map[string]interface{}{
|
headerMap := map[string]interface{}{
|
||||||
"number": (*hexutil.Big)(head.Number),
|
"number": (*hexutil.Big)(head.Number),
|
||||||
"hash": head.Hash(),
|
"hash": head.Hash(),
|
||||||
"parentHash": head.ParentHash,
|
"parentHash": head.ParentHash,
|
||||||
@ -55,6 +53,11 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
|
|||||||
"transactionsRoot": head.TxHash,
|
"transactionsRoot": head.TxHash,
|
||||||
"receiptsRoot": head.ReceiptHash,
|
"receiptsRoot": head.ReceiptHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if head.BaseFee != nil {
|
||||||
|
headerMap["baseFee"] = head.BaseFee
|
||||||
|
}
|
||||||
|
return headerMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
|
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
|
||||||
@ -117,8 +120,8 @@ func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Has
|
|||||||
}
|
}
|
||||||
fields["transactions"] = transactions
|
fields["transactions"] = transactions
|
||||||
}
|
}
|
||||||
fields["uncles"] = uncleHashes
|
|
||||||
|
|
||||||
|
fields["uncles"] = uncleHashes
|
||||||
return fields, nil
|
return fields, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,14 +137,15 @@ func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa
|
|||||||
|
|
||||||
// NewRPCTransaction returns a transaction that will serialize to the RPC
|
// NewRPCTransaction returns a transaction that will serialize to the RPC
|
||||||
// representation, with the given location metadata set (if available).
|
// representation, with the given location metadata set (if available).
|
||||||
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
|
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction {
|
||||||
var signer types.Signer = types.FrontierSigner{}
|
var signer types.Signer
|
||||||
if tx.Protected() {
|
if tx.Protected() {
|
||||||
signer = types.NewEIP155Signer(tx.ChainId())
|
signer = types.LatestSignerForChainID(tx.ChainId())
|
||||||
|
} else {
|
||||||
|
signer = types.HomesteadSigner{}
|
||||||
}
|
}
|
||||||
from, _ := types.Sender(signer, tx)
|
from, _ := types.Sender(signer, tx)
|
||||||
v, r, s := tx.RawSignatureValues()
|
v, r, s := tx.RawSignatureValues()
|
||||||
|
|
||||||
result := &RPCTransaction{
|
result := &RPCTransaction{
|
||||||
From: from,
|
From: from,
|
||||||
Gas: hexutil.Uint64(tx.Gas()),
|
Gas: hexutil.Uint64(tx.Gas()),
|
||||||
@ -151,6 +155,7 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
|
|||||||
Nonce: hexutil.Uint64(tx.Nonce()),
|
Nonce: hexutil.Uint64(tx.Nonce()),
|
||||||
To: tx.To(),
|
To: tx.To(),
|
||||||
Value: (*hexutil.Big)(tx.Value()),
|
Value: (*hexutil.Big)(tx.Value()),
|
||||||
|
Type: hexutil.Uint64(tx.Type()),
|
||||||
V: (*hexutil.Big)(v),
|
V: (*hexutil.Big)(v),
|
||||||
R: (*hexutil.Big)(r),
|
R: (*hexutil.Big)(r),
|
||||||
S: (*hexutil.Big)(s),
|
S: (*hexutil.Big)(s),
|
||||||
@ -160,6 +165,26 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
|
|||||||
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
|
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
|
||||||
result.TransactionIndex = (*hexutil.Uint64)(&index)
|
result.TransactionIndex = (*hexutil.Uint64)(&index)
|
||||||
}
|
}
|
||||||
|
switch tx.Type() {
|
||||||
|
case types.AccessListTxType:
|
||||||
|
al := tx.AccessList()
|
||||||
|
result.Accesses = &al
|
||||||
|
result.ChainID = (*hexutil.Big)(tx.ChainId())
|
||||||
|
case types.DynamicFeeTxType:
|
||||||
|
al := tx.AccessList()
|
||||||
|
result.Accesses = &al
|
||||||
|
result.ChainID = (*hexutil.Big)(tx.ChainId())
|
||||||
|
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
|
||||||
|
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
|
||||||
|
// if the transaction has been mined, compute the effective gas price
|
||||||
|
if baseFee != nil && blockHash != (common.Hash{}) {
|
||||||
|
// price = min(tip, gasFeeCap - baseFee) + baseFee
|
||||||
|
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
|
||||||
|
result.GasPrice = (*hexutil.Big)(price)
|
||||||
|
} else {
|
||||||
|
result.GasPrice = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,55 +265,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti
|
|||||||
if index >= uint64(len(txs)) {
|
if index >= uint64(len(txs)) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
|
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee())
|
||||||
}
|
|
||||||
|
|
||||||
// extractLogsOfInterest returns logs from the receipt IPLD
|
|
||||||
func extractLogsOfInterest(rctIPLDs []ipfs.BlockModel, wantedTopics [][]string) ([]*types.Log, error) {
|
|
||||||
var logs []*types.Log
|
|
||||||
for _, rctIPLD := range rctIPLDs {
|
|
||||||
rctRLP := rctIPLD
|
|
||||||
var rct types.Receipt
|
|
||||||
if err := rlp.DecodeBytes(rctRLP.Data, &rct); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, log := range rct.Logs {
|
|
||||||
if wanted := wantedLog(wantedTopics, log.Topics); wanted == true {
|
|
||||||
logs = append(logs, log)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return logs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns true if the log matches on the filter
|
|
||||||
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
|
|
||||||
// actualTopics will always have length <= 4
|
|
||||||
// wantedTopics will always have length 4
|
|
||||||
matches := 0
|
|
||||||
for i, actualTopic := range actualTopics {
|
|
||||||
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
|
|
||||||
if len(wantedTopics[i]) > 0 {
|
|
||||||
matches += sliceContainsHash(wantedTopics[i], actualTopic)
|
|
||||||
} else {
|
|
||||||
// Filter slot is empty, not matching any topics at this slot => counts as a match
|
|
||||||
matches++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matches == len(actualTopics) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns 1 if the slice contains the hash, 0 if it does not
|
|
||||||
func sliceContainsHash(slice []string, hash common.Hash) int {
|
|
||||||
for _, str := range slice {
|
|
||||||
if str == hash.String() {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
|
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {
|
||||||
|
@ -22,14 +22,14 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"gorm.io/driver/postgres"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Retriever interface for substituting mocks in tests
|
// Retriever interface for substituting mocks in tests
|
||||||
@ -41,13 +41,74 @@ type Retriever interface {
|
|||||||
|
|
||||||
// CIDRetriever satisfies the CIDRetriever interface for ethereum
|
// CIDRetriever satisfies the CIDRetriever interface for ethereum
|
||||||
type CIDRetriever struct {
|
type CIDRetriever struct {
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
|
gormDB *gorm.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPLDModelRecord struct {
|
||||||
|
models.IPLDModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableName overrides the table name used by IPLD
|
||||||
|
func (IPLDModelRecord) TableName() string {
|
||||||
|
return "public.blocks"
|
||||||
|
}
|
||||||
|
|
||||||
|
type HeaderCIDRecord struct {
|
||||||
|
CID string `gorm:"column:cid"`
|
||||||
|
BlockHash string `gorm:"primaryKey"`
|
||||||
|
BlockNumber string
|
||||||
|
ParentHash string
|
||||||
|
Timestamp uint64
|
||||||
|
StateRoot string
|
||||||
|
TotalDifficulty string `gorm:"column:td"`
|
||||||
|
TxRoot string
|
||||||
|
RctRoot string `gorm:"column:receipt_root"`
|
||||||
|
UncleRoot string
|
||||||
|
Bloom []byte
|
||||||
|
MhKey string
|
||||||
|
|
||||||
|
// gorm doesn't check if foreign key exists in database.
|
||||||
|
// It is required to eager load relations using preload.
|
||||||
|
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID;references:BlockHash"`
|
||||||
|
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableName overrides the table name used by HeaderCIDRecord
|
||||||
|
func (HeaderCIDRecord) TableName() string {
|
||||||
|
return "eth.header_cids"
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransactionCIDRecord struct {
|
||||||
|
CID string `gorm:"column:cid"`
|
||||||
|
TxHash string `gorm:"primaryKey"`
|
||||||
|
HeaderID string `gorm:"column:header_id"`
|
||||||
|
Index int64
|
||||||
|
Src string
|
||||||
|
Dst string
|
||||||
|
MhKey string
|
||||||
|
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TableName overrides the table name used by TransactionCIDRecord
|
||||||
|
func (TransactionCIDRecord) TableName() string {
|
||||||
|
return "eth.transaction_cids"
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
|
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
|
||||||
func NewCIDRetriever(db *postgres.DB) *CIDRetriever {
|
func NewCIDRetriever(db *sqlx.DB) *CIDRetriever {
|
||||||
|
gormDB, err := gorm.Open(postgres.New(postgres.Config{
|
||||||
|
Conn: db,
|
||||||
|
}), &gorm.Config{})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return &CIDRetriever{
|
return &CIDRetriever{
|
||||||
db: db,
|
db: db,
|
||||||
|
gormDB: gormDB,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,9 +147,10 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Retrieve cached header CIDs at this block height
|
// Retrieve cached header CIDs at this block height
|
||||||
headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
var headers []models.HeaderModel
|
||||||
|
headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error", err)
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
cws := make([]CIDWrapper, len(headers))
|
cws := make([]CIDWrapper, len(headers))
|
||||||
@ -101,7 +163,8 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
empty = false
|
empty = false
|
||||||
if filter.HeaderFilter.Uncles {
|
if filter.HeaderFilter.Uncles {
|
||||||
// Retrieve uncle cids for this header id
|
// Retrieve uncle cids for this header id
|
||||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
|
var uncleCIDs []models.UncleModel
|
||||||
|
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("uncle cid retrieval error")
|
log.Error("uncle cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -111,7 +174,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}
|
}
|
||||||
// Retrieve cached trx CIDs
|
// Retrieve cached trx CIDs
|
||||||
if !filter.TxFilter.Off {
|
if !filter.TxFilter.Off {
|
||||||
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.ID)
|
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("transaction cid retrieval error")
|
log.Error("transaction cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -120,13 +183,13 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
empty = false
|
empty = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
trxIds := make([]int64, len(cw.Transactions))
|
trxHashes := make([]string, len(cw.Transactions))
|
||||||
for j, tx := range cw.Transactions {
|
for j, t := range cw.Transactions {
|
||||||
trxIds[j] = tx.ID
|
trxHashes[j] = t.TxHash
|
||||||
}
|
}
|
||||||
// Retrieve cached receipt CIDs
|
// Retrieve cached receipt CIDs
|
||||||
if !filter.ReceiptFilter.Off {
|
if !filter.ReceiptFilter.Off {
|
||||||
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.ID, trxIds)
|
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("receipt cid retrieval error")
|
log.Error("receipt cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -137,7 +200,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}
|
}
|
||||||
// Retrieve cached state CIDs
|
// Retrieve cached state CIDs
|
||||||
if !filter.StateFilter.Off {
|
if !filter.StateFilter.Off {
|
||||||
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.ID)
|
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("state cid retrieval error")
|
log.Error("state cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -148,7 +211,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}
|
}
|
||||||
// Retrieve cached storage CIDs
|
// Retrieve cached storage CIDs
|
||||||
if !filter.StorageFilter.Off {
|
if !filter.StorageFilter.Off {
|
||||||
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.ID)
|
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("storage cid retrieval error")
|
log.Error("storage cid retrieval error")
|
||||||
return nil, true, err
|
return nil, true, err
|
||||||
@ -164,35 +227,36 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
|
||||||
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]eth.HeaderModel, error) {
|
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) {
|
||||||
log.Debug("retrieving header cids for block ", blockNumber)
|
log.Debug("retrieving header cids for block ", blockNumber)
|
||||||
headers := make([]eth.HeaderModel, 0)
|
headers := make([]models.HeaderModel, 0)
|
||||||
pgStr := `SELECT * FROM eth.header_cids
|
pgStr := `SELECT CAST(block_number as Text), block_hash,parent_hash,cid,mh_key,CAST(td as Text),node_id,
|
||||||
|
CAST(reward as Text), state_root, uncle_root, tx_root, receipt_root, bloom, timestamp, times_validated,
|
||||||
|
coinbase FROM eth.header_cids
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
return headers, tx.Select(&headers, pgStr, blockNumber)
|
return headers, tx.Select(&headers, pgStr, blockNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
|
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
|
||||||
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.UncleModel, error) {
|
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) {
|
||||||
log.Debug("retrieving uncle cids for block id ", headerID)
|
log.Debug("retrieving uncle cids for block id ", headerID)
|
||||||
headers := make([]eth.UncleModel, 0)
|
headers := make([]models.UncleModel, 0)
|
||||||
pgStr := `SELECT * FROM eth.uncle_cids
|
pgStr := `SELECT header_id,block_hash,parent_hash,cid,mh_key, CAST(reward as text) FROM eth.uncle_cids
|
||||||
WHERE header_id = $1`
|
WHERE header_id = $1`
|
||||||
return headers, tx.Select(&headers, pgStr, headerID)
|
return headers, tx.Select(&headers, pgStr, headerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
|
||||||
// also returns the ids for the returned transaction cids
|
// also returns the ids for the returned transaction cids
|
||||||
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]eth.TxModel, error) {
|
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID string) ([]models.TxModel, error) {
|
||||||
log.Debug("retrieving transaction cids for header id ", headerID)
|
log.Debug("retrieving transaction cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 3)
|
args := make([]interface{}, 0, 3)
|
||||||
results := make([]eth.TxModel, 0)
|
results := make([]models.TxModel, 0)
|
||||||
id := 1
|
id := 1
|
||||||
pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
|
pgStr := fmt.Sprintf(`SELECT transaction_cids.tx_hash, transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key,
|
||||||
transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
|
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
||||||
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
|
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
WHERE header_cids.block_hash = $%d`, id)
|
||||||
WHERE header_cids.id = $%d`, id)
|
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
id++
|
id++
|
||||||
if len(txFilter.Dst) > 0 {
|
if len(txFilter.Dst) > 0 {
|
||||||
@ -208,97 +272,148 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
|
|||||||
return results, tx.Select(&results, pgStr, args...)
|
return results, tx.Select(&results, pgStr, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) {
|
||||||
// filter parameters and correspond to the provided tx ids
|
for i, topicSet := range topics {
|
||||||
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]eth.ReceiptModel, error) {
|
if len(topicSet) == 0 {
|
||||||
log.Debug("retrieving receipt cids for header id ", headerID)
|
continue
|
||||||
args := make([]interface{}, 0, 4)
|
}
|
||||||
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
|
|
||||||
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
|
if !first {
|
||||||
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
|
pgStr += " AND"
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
} else {
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
first = false
|
||||||
AND transaction_cids.header_id = header_cids.id
|
}
|
||||||
AND header_cids.id = $1`
|
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id)
|
||||||
id := 2
|
args = append(args, pq.Array(topicSet))
|
||||||
args = append(args, headerID)
|
*id++
|
||||||
|
}
|
||||||
|
return pgStr, args
|
||||||
|
}
|
||||||
|
|
||||||
|
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
|
||||||
|
if len(rctFilter.LogAddresses) > 0 {
|
||||||
|
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
|
||||||
|
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||||
|
*id++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter on topics if there are any
|
||||||
|
if hasTopics(rctFilter.Topics) {
|
||||||
|
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pgStr, args
|
||||||
|
}
|
||||||
|
|
||||||
|
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
|
||||||
|
rctCond := " AND (receipt_cids.tx_id = ANY ( "
|
||||||
|
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
|
||||||
if len(rctFilter.LogAddresses) > 0 {
|
if len(rctFilter.LogAddresses) > 0 {
|
||||||
// Filter on log contract addresses if there are any
|
// Filter on log contract addresses if there are any
|
||||||
pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
|
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id)
|
||||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
args = append(args, pq.Array(rctFilter.LogAddresses))
|
||||||
id++
|
*id++
|
||||||
|
|
||||||
// Filter on topics if there are any
|
// Filter on topics if there are any
|
||||||
if hasTopics(rctFilter.Topics) {
|
if hasTopics(rctFilter.Topics) {
|
||||||
pgStr += " AND ("
|
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
|
||||||
first := true
|
|
||||||
for i, topicSet := range rctFilter.Topics {
|
|
||||||
if i < 4 && len(topicSet) > 0 {
|
|
||||||
if first {
|
|
||||||
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
first = false
|
|
||||||
} else {
|
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
}
|
|
||||||
args = append(args, pq.Array(topicSet))
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pgStr += ")"
|
pgStr += ")"
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
|
||||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
// Filter on txHashes if there are any, and we are matching txs
|
||||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||||
args = append(args, pq.Array(trxIds))
|
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||||
|
args = append(args, pq.Array(txHashes))
|
||||||
}
|
}
|
||||||
pgStr += ")"
|
pgStr += ")"
|
||||||
} else { // If there are no contract addresses to filter on
|
} else { // If there are no contract addresses to filter on
|
||||||
// Filter on topics if there are any
|
// Filter on topics if there are any
|
||||||
if hasTopics(rctFilter.Topics) {
|
if hasTopics(rctFilter.Topics) {
|
||||||
pgStr += " AND (("
|
pgStr += rctCond + logQuery
|
||||||
first := true
|
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true)
|
||||||
for i, topicSet := range rctFilter.Topics {
|
pgStr += ")"
|
||||||
if i < 4 && len(topicSet) > 0 {
|
// Filter on txHashes if there are any, and we are matching txs
|
||||||
if first {
|
if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||||
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id)
|
||||||
first = false
|
args = append(args, pq.Array(txHashes))
|
||||||
} else {
|
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
}
|
|
||||||
args = append(args, pq.Array(topicSet))
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pgStr += ")"
|
pgStr += ")"
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
} else if rctFilter.MatchTxs && len(txHashes) > 0 {
|
||||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
|
||||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
|
||||||
args = append(args, pq.Array(trxIds))
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
|
|
||||||
// If there are no contract addresses or topics to filter on,
|
// If there are no contract addresses or topics to filter on,
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
// Filter on txHashes if there are any, and we are matching txs
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id)
|
||||||
args = append(args, pq.Array(trxIds))
|
args = append(args, pq.Array(txHashes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pgStr += ` ORDER BY transaction_cids.index`
|
|
||||||
receiptCids := make([]eth.ReceiptModel, 0)
|
return pgStr, args
|
||||||
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
|
||||||
// filter parameters and correspond to the provided tx ids
|
// filter parameters and correspond to the provided tx ids
|
||||||
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]eth.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
log.Debug("retrieving receipt cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 5)
|
args := make([]interface{}, 0, 4)
|
||||||
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
|
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||||
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
|
receipt_cids.contract, receipt_cids.contract_hash
|
||||||
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
|
|
||||||
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
AND transaction_cids.header_id = header_cids.id`
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND header_cids.block_hash = $1`
|
||||||
|
id := 2
|
||||||
|
args = append(args, headerID)
|
||||||
|
|
||||||
|
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
|
||||||
|
|
||||||
|
pgStr += ` ORDER BY transaction_cids.index`
|
||||||
|
receiptCIDs := make([]models.ReceiptModel, 0)
|
||||||
|
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveFilteredGQLLogs retrieves and returns all the log cIDs provided blockHash that conform to the provided
|
||||||
|
// filter parameters.
|
||||||
|
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
|
log.Debug("retrieving log cids for receipt ids")
|
||||||
|
args := make([]interface{}, 0, 4)
|
||||||
|
id := 1
|
||||||
|
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
|
||||||
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
|
||||||
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
|
||||||
|
|
||||||
|
args = append(args, blockHash.String())
|
||||||
|
id++
|
||||||
|
|
||||||
|
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||||
|
pgStr += ` ORDER BY log_cids.index`
|
||||||
|
|
||||||
|
logCIDs := make([]LogResult, 0)
|
||||||
|
err := tx.Select(&logCIDs, pgStr, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return logCIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
|
||||||
|
// filter parameters.
|
||||||
|
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
|
||||||
|
log.Debug("retrieving log cids for receipt ids")
|
||||||
|
args := make([]interface{}, 0, 4)
|
||||||
|
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
|
||||||
|
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
|
||||||
|
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
|
||||||
|
header_cids.block_hash, CAST(header_cids.block_number as Text)
|
||||||
|
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
|
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
|
||||||
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash`
|
||||||
id := 1
|
id := 1
|
||||||
if blockNumber > 0 {
|
if blockNumber > 0 {
|
||||||
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||||
@ -310,70 +425,45 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
|
|||||||
args = append(args, blockHash.String())
|
args = append(args, blockHash.String())
|
||||||
id++
|
id++
|
||||||
}
|
}
|
||||||
if len(rctFilter.LogAddresses) > 0 {
|
|
||||||
// Filter on log contract addresses if there are any
|
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
|
||||||
pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
|
pgStr += ` ORDER BY log_cids.index`
|
||||||
args = append(args, pq.Array(rctFilter.LogAddresses))
|
|
||||||
id++
|
logCIDs := make([]LogResult, 0)
|
||||||
// Filter on topics if there are any
|
err := tx.Select(&logCIDs, pgStr, args...)
|
||||||
if hasTopics(rctFilter.Topics) {
|
if err != nil {
|
||||||
pgStr += " AND ("
|
return nil, err
|
||||||
first := true
|
|
||||||
for i, topicSet := range rctFilter.Topics {
|
|
||||||
if i < 4 && len(topicSet) > 0 {
|
|
||||||
if first {
|
|
||||||
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
first = false
|
|
||||||
} else {
|
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
}
|
|
||||||
args = append(args, pq.Array(topicSet))
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
|
||||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
|
||||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
|
||||||
args = append(args, pq.Array(trxIds))
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
} else { // If there are no contract addresses to filter on
|
|
||||||
// Filter on topics if there are any
|
|
||||||
if hasTopics(rctFilter.Topics) {
|
|
||||||
pgStr += " AND (("
|
|
||||||
first := true
|
|
||||||
for i, topicSet := range rctFilter.Topics {
|
|
||||||
if i < 4 && len(topicSet) > 0 {
|
|
||||||
if first {
|
|
||||||
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
first = false
|
|
||||||
} else {
|
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
|
|
||||||
}
|
|
||||||
args = append(args, pq.Array(topicSet))
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
|
||||||
if rctFilter.MatchTxs && len(trxIds) > 0 {
|
|
||||||
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
|
||||||
args = append(args, pq.Array(trxIds))
|
|
||||||
}
|
|
||||||
pgStr += ")"
|
|
||||||
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
|
|
||||||
// If there are no contract addresses or topics to filter on,
|
|
||||||
// Filter on txIDs if there are any and we are matching txs
|
|
||||||
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
|
|
||||||
args = append(args, pq.Array(trxIds))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return logCIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
|
||||||
|
// filter parameters and correspond to the provided tx ids
|
||||||
|
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) {
|
||||||
|
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||||
|
args := make([]interface{}, 0, 5)
|
||||||
|
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id
|
||||||
|
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash`
|
||||||
|
id := 1
|
||||||
|
if blockNumber > 0 {
|
||||||
|
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
|
||||||
|
args = append(args, blockNumber)
|
||||||
|
id++
|
||||||
|
}
|
||||||
|
if blockHash != nil {
|
||||||
|
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
|
||||||
|
args = append(args, blockHash.String())
|
||||||
|
id++
|
||||||
|
}
|
||||||
|
|
||||||
|
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, txHashes)
|
||||||
|
|
||||||
pgStr += ` ORDER BY transaction_cids.index`
|
pgStr += ` ORDER BY transaction_cids.index`
|
||||||
receiptCids := make([]eth.ReceiptModel, 0)
|
receiptCIDs := make([]models.ReceiptModel, 0)
|
||||||
return receiptCids, tx.Select(&receiptCids, pgStr, args...)
|
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasTopics(topics [][]string) bool {
|
func hasTopics(topics [][]string) bool {
|
||||||
@ -386,13 +476,13 @@ func hasTopics(topics [][]string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
|
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
|
||||||
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]eth.StateNodeModel, error) {
|
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) {
|
||||||
log.Debug("retrieving state cids for header id ", headerID)
|
log.Debug("retrieving state cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 2)
|
args := make([]interface{}, 0, 2)
|
||||||
pgStr := `SELECT state_cids.id, state_cids.header_id,
|
pgStr := `SELECT state_cids.header_id,
|
||||||
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
|
||||||
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
WHERE header_cids.id = $1`
|
WHERE header_cids.block_hash = $1`
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
addrLen := len(stateFilter.Addresses)
|
addrLen := len(stateFilter.Addresses)
|
||||||
if addrLen > 0 {
|
if addrLen > 0 {
|
||||||
@ -406,20 +496,20 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
|
|||||||
if !stateFilter.IntermediateNodes {
|
if !stateFilter.IntermediateNodes {
|
||||||
pgStr += ` AND state_cids.node_type = 2`
|
pgStr += ` AND state_cids.node_type = 2`
|
||||||
}
|
}
|
||||||
stateNodeCIDs := make([]eth.StateNodeModel, 0)
|
stateNodeCIDs := make([]models.StateNodeModel, 0)
|
||||||
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
|
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
|
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
|
||||||
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]eth.StorageNodeWithStateKeyModel, error) {
|
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) {
|
||||||
log.Debug("retrieving storage cids for header id ", headerID)
|
log.Debug("retrieving storage cids for header id ", headerID)
|
||||||
args := make([]interface{}, 0, 3)
|
args := make([]interface{}, 0, 3)
|
||||||
pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
pgStr := `SELECT storage_cids.header_id, storage_cids.storage_leaf_key, storage_cids.node_type,
|
||||||
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, state_cids.state_leaf_key
|
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path, state_cids.state_leaf_key
|
||||||
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
FROM eth.storage_cids, eth.state_cids, eth.header_cids
|
||||||
WHERE storage_cids.state_id = state_cids.id
|
WHERE storage_cids.header_id = state_cids.header_id AND storage_cids.state_path = state_cids.state_path
|
||||||
AND state_cids.header_id = header_cids.id
|
AND state_cids.header_id = header_cids.block_hash
|
||||||
AND header_cids.id = $1`
|
AND header_cids.block_hash = $1`
|
||||||
args = append(args, headerID)
|
args = append(args, headerID)
|
||||||
id := 2
|
id := 2
|
||||||
addrLen := len(storageFilter.Addresses)
|
addrLen := len(storageFilter.Addresses)
|
||||||
@ -439,18 +529,18 @@ func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageF
|
|||||||
if !storageFilter.IntermediateNodes {
|
if !storageFilter.IntermediateNodes {
|
||||||
pgStr += ` AND storage_cids.node_type = 2`
|
pgStr += ` AND storage_cids.node_type = 2`
|
||||||
}
|
}
|
||||||
storageNodeCIDs := make([]eth.StorageNodeWithStateKeyModel, 0)
|
storageNodeCIDs := make([]models.StorageNodeWithStateKeyModel, 0)
|
||||||
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
|
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
|
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
|
||||||
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving block cids for block hash ", blockHash.String())
|
log.Debug("retrieving block cids for block hash ", blockHash.String())
|
||||||
|
|
||||||
// Begin new db tx
|
// Begin new db tx
|
||||||
tx, err := ecr.db.Beginx()
|
tx, err := ecr.db.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
@ -463,26 +553,30 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderM
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
headerCID, err := ecr.RetrieveHeaderCIDByHash(tx, blockHash)
|
var headerCID models.HeaderModel
|
||||||
|
headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
|
var uncleCIDs []models.UncleModel
|
||||||
|
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("uncle cid retrieval error")
|
log.Error("uncle cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
|
var txCIDs []models.TxModel
|
||||||
|
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("tx cid retrieval error")
|
log.Error("tx cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
txIDs := make([]int64, len(txCIDs))
|
txHashes := make([]string, len(txCIDs))
|
||||||
for i, txCID := range txCIDs {
|
for i, txCID := range txCIDs {
|
||||||
txIDs[i] = txCID.ID
|
txHashes[i] = txCID.TxHash
|
||||||
}
|
}
|
||||||
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
var rctCIDs []models.ReceiptModel
|
||||||
|
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("rct cid retrieval error")
|
log.Error("rct cid retrieval error")
|
||||||
}
|
}
|
||||||
@ -490,13 +584,13 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
|
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
|
||||||
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) {
|
||||||
log.Debug("retrieving block cids for block number ", blockNumber)
|
log.Debug("retrieving block cids for block number ", blockNumber)
|
||||||
|
|
||||||
// Begin new db tx
|
// Begin new db tx
|
||||||
tx, err := ecr.db.Beginx()
|
tx, err := ecr.db.Beginx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
@ -509,29 +603,33 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderMod
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
headerCID, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
var headerCID []models.HeaderModel
|
||||||
|
headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("header cid retrieval error")
|
log.Error("header cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if len(headerCID) < 1 {
|
if len(headerCID) < 1 {
|
||||||
return eth.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
|
return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
|
||||||
}
|
}
|
||||||
uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
|
var uncleCIDs []models.UncleModel
|
||||||
|
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("uncle cid retrieval error")
|
log.Error("uncle cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
|
var txCIDs []models.TxModel
|
||||||
|
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("tx cid retrieval error")
|
log.Error("tx cid retrieval error")
|
||||||
return eth.HeaderModel{}, nil, nil, nil, err
|
return models.HeaderModel{}, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
txIDs := make([]int64, len(txCIDs))
|
txHashes := make([]string, len(txCIDs))
|
||||||
for i, txCID := range txCIDs {
|
for i, txCID := range txCIDs {
|
||||||
txIDs[i] = txCID.ID
|
txHashes[i] = txCID.TxHash
|
||||||
}
|
}
|
||||||
rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
|
var rctCIDs []models.ReceiptModel
|
||||||
|
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("rct cid retrieval error")
|
log.Error("rct cid retrieval error")
|
||||||
}
|
}
|
||||||
@ -539,34 +637,89 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderMod
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
// RetrieveHeaderCIDByHash returns the header for the given block hash
|
||||||
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (eth.HeaderModel, error) {
|
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) {
|
||||||
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
log.Debug("retrieving header cids for block hash ", blockHash.String())
|
||||||
pgStr := `SELECT * FROM eth.header_cids
|
pgStr := `SELECT block_hash, CAST(block_number as Text), parent_hash, cid, mh_key, CAST(td as Text),
|
||||||
|
state_root, uncle_root, tx_root, receipt_root, bloom, timestamp FROM eth.header_cids
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
var headerCID eth.HeaderModel
|
var headerCID models.HeaderModel
|
||||||
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
|
||||||
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.TxModel, error) {
|
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.TxModel, error) {
|
||||||
log.Debug("retrieving tx cids for block id ", headerID)
|
log.Debug("retrieving tx cids for block id ", headerID)
|
||||||
pgStr := `SELECT * FROM eth.transaction_cids
|
pgStr := `SELECT * FROM eth.transaction_cids
|
||||||
WHERE header_id = $1
|
WHERE header_id = $1
|
||||||
ORDER BY index`
|
ORDER BY index`
|
||||||
var txCIDs []eth.TxModel
|
var txCIDs []models.TxModel
|
||||||
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
|
return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
|
||||||
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]eth.ReceiptModel, error) {
|
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) {
|
||||||
log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
|
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes)
|
||||||
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
|
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
|
||||||
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
|
receipt_cids.contract, receipt_cids.contract_hash
|
||||||
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
|
|
||||||
FROM eth.receipt_cids, eth.transaction_cids
|
FROM eth.receipt_cids, eth.transaction_cids
|
||||||
WHERE tx_id = ANY($1::INTEGER[])
|
WHERE tx_id = ANY($1)
|
||||||
AND receipt_cids.tx_id = transaction_cids.id
|
AND receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
ORDER BY transaction_cids.index`
|
ORDER BY transaction_cids.index`
|
||||||
var rctCIDs []eth.ReceiptModel
|
var rctCIDs []models.ReceiptModel
|
||||||
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
|
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
|
||||||
|
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
|
||||||
|
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
|
||||||
|
|
||||||
|
var headerCIDs []HeaderCIDRecord
|
||||||
|
|
||||||
|
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||||
|
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||||
|
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||||
|
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
|
||||||
|
}).Joins("IPLD").Find(&headerCIDs, "block_number = ?", blockNumber).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("header cid retrieval error")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerCIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash
|
||||||
|
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
|
||||||
|
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
|
||||||
|
|
||||||
|
var headerCID HeaderCIDRecord
|
||||||
|
|
||||||
|
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
|
||||||
|
// Will use join for TransactionCIDs once preload for 1:N is supported.
|
||||||
|
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
|
||||||
|
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
|
||||||
|
}).Joins("IPLD").First(&headerCID, "block_hash = ?", blockHash.String()).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error("header cid retrieval error")
|
||||||
|
return headerCID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerCID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetrieveTxCIDByHash returns the tx for the given tx hash
|
||||||
|
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
|
||||||
|
log.Debug("retrieving tx cid for tx hash ", txHash)
|
||||||
|
|
||||||
|
var txCID TransactionCIDRecord
|
||||||
|
|
||||||
|
err := ecr.gormDB.Joins("IPLD").First(&txCID, "tx_hash = ?", txHash).Error
|
||||||
|
if err != nil {
|
||||||
|
log.Error("header cid retrieval error")
|
||||||
|
return txCID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return txCID, nil
|
||||||
}
|
}
|
||||||
|
@ -19,19 +19,19 @@ package eth_test
|
|||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -211,45 +211,64 @@ var (
|
|||||||
|
|
||||||
var _ = Describe("Retriever", func() {
|
var _ = Describe("Retriever", func() {
|
||||||
var (
|
var (
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
repo *eth2.IPLDPublisher
|
diffIndexer interfaces.StateDiffIndexer
|
||||||
retriever *eth.CIDRetriever
|
retriever *eth.CIDRetriever
|
||||||
)
|
)
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
db = shared.SetupDB()
|
||||||
db, err = shared.SetupDB()
|
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
|
||||||
repo = eth2.NewIPLDPublisher(db)
|
|
||||||
retriever = eth.NewCIDRetriever(db)
|
retriever = eth.NewCIDRetriever(db)
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
eth.TearDownDB(db)
|
shared.TearDownDB(db)
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Retrieve", func() {
|
Describe("Retrieve", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
err := repo.Publish(test_helpers.MockConvertedPayload)
|
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
for _, node := range test_helpers.MockStateNodes {
|
||||||
|
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
|
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
|
||||||
|
type rctCIDAndMHKeyResult struct {
|
||||||
|
LeafCID string `db:"leaf_cid"`
|
||||||
|
LeafMhKey string `db:"leaf_mh_key"`
|
||||||
|
}
|
||||||
|
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
|
||||||
|
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND header_cids.block_number = $1
|
||||||
|
ORDER BY transaction_cids.index`
|
||||||
|
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||||
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
cids, empty, err := retriever.Retrieve(openFilter, 1)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids)).To(Equal(1))
|
Expect(len(cids)).To(Equal(1))
|
||||||
Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
|
|
||||||
expectedHeaderCID := test_helpers.MockCIDWrapper.Header
|
expectedHeaderCID := test_helpers.MockCIDWrapper.Header
|
||||||
expectedHeaderCID.ID = cids[0].Header.ID
|
expectedHeaderCID.BlockHash = cids[0].Header.BlockHash
|
||||||
expectedHeaderCID.NodeID = cids[0].Header.NodeID
|
expectedHeaderCID.NodeID = cids[0].Header.NodeID
|
||||||
Expect(cids[0].Header).To(Equal(expectedHeaderCID))
|
Expect(cids[0].Header).To(Equal(expectedHeaderCID))
|
||||||
Expect(len(cids[0].Transactions)).To(Equal(3))
|
Expect(len(cids[0].Transactions)).To(Equal(4))
|
||||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
|
||||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
|
||||||
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
|
||||||
Expect(len(cids[0].Receipts)).To(Equal(3))
|
Expect(len(cids[0].Receipts)).To(Equal(4))
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[0].CID)).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[2].CID)).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
|
||||||
Expect(len(cids[0].StateNodes)).To(Equal(2))
|
Expect(len(cids[0].StateNodes)).To(Equal(2))
|
||||||
|
|
||||||
for _, stateNode := range cids[0].StateNodes {
|
for _, stateNode := range cids[0].StateNodes {
|
||||||
if stateNode.CID == test_helpers.State1CID.String() {
|
if stateNode.CID == test_helpers.State1CID.String() {
|
||||||
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex()))
|
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex()))
|
||||||
@ -264,25 +283,37 @@ var _ = Describe("Retriever", func() {
|
|||||||
}
|
}
|
||||||
Expect(len(cids[0].StorageNodes)).To(Equal(1))
|
Expect(len(cids[0].StorageNodes)).To(Equal(1))
|
||||||
expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes
|
expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes
|
||||||
expectedStorageNodeCIDs[0].ID = cids[0].StorageNodes[0].ID
|
expectedStorageNodeCIDs[0].HeaderID = cids[0].StorageNodes[0].HeaderID
|
||||||
expectedStorageNodeCIDs[0].StateID = cids[0].StorageNodes[0].StateID
|
expectedStorageNodeCIDs[0].StatePath = cids[0].StorageNodes[0].StatePath
|
||||||
Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs))
|
Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Applies filters from the provided config.Subscription", func() {
|
It("Applies filters from the provided config.Subscription", func() {
|
||||||
|
type rctCIDAndMHKeyResult struct {
|
||||||
|
LeafCID string `db:"leaf_cid"`
|
||||||
|
LeafMhKey string `db:"leaf_mh_key"`
|
||||||
|
}
|
||||||
|
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
|
||||||
|
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
|
AND transaction_cids.header_id = header_cids.block_hash
|
||||||
|
AND header_cids.block_number = $1
|
||||||
|
ORDER BY transaction_cids.index`
|
||||||
|
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
|
||||||
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
|
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids1)).To(Equal(1))
|
Expect(len(cids1)).To(Equal(1))
|
||||||
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids1[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids1[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids1[0].Transactions)).To(Equal(0))
|
Expect(len(cids1[0].Transactions)).To(Equal(0))
|
||||||
Expect(len(cids1[0].StateNodes)).To(Equal(0))
|
Expect(len(cids1[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids1[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids1[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids1[0].Receipts)).To(Equal(1))
|
Expect(len(cids1[0].Receipts)).To(Equal(1))
|
||||||
expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0]
|
expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0]
|
||||||
expectedReceiptCID.ID = cids1[0].Receipts[0].ID
|
|
||||||
expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID
|
expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID
|
||||||
|
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||||
|
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||||
Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||||
|
|
||||||
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
|
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
|
||||||
@ -290,14 +321,15 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids2)).To(Equal(1))
|
Expect(len(cids2)).To(Equal(1))
|
||||||
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids2[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids2[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids2[0].Transactions)).To(Equal(0))
|
Expect(len(cids2[0].Transactions)).To(Equal(0))
|
||||||
Expect(len(cids2[0].StateNodes)).To(Equal(0))
|
Expect(len(cids2[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids2[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids2[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids2[0].Receipts)).To(Equal(1))
|
Expect(len(cids2[0].Receipts)).To(Equal(1))
|
||||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
||||||
expectedReceiptCID.ID = cids2[0].Receipts[0].ID
|
|
||||||
expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID
|
expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID
|
||||||
|
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||||
|
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||||
Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||||
|
|
||||||
cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1)
|
cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1)
|
||||||
@ -305,14 +337,15 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids3)).To(Equal(1))
|
Expect(len(cids3)).To(Equal(1))
|
||||||
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids3[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids3[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids3[0].Transactions)).To(Equal(0))
|
Expect(len(cids3[0].Transactions)).To(Equal(0))
|
||||||
Expect(len(cids3[0].StateNodes)).To(Equal(0))
|
Expect(len(cids3[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids3[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids3[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids3[0].Receipts)).To(Equal(1))
|
Expect(len(cids3[0].Receipts)).To(Equal(1))
|
||||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
|
||||||
expectedReceiptCID.ID = cids3[0].Receipts[0].ID
|
|
||||||
expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID
|
expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID
|
||||||
|
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
|
||||||
|
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
|
||||||
Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||||
|
|
||||||
cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1)
|
cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1)
|
||||||
@ -320,14 +353,15 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids4)).To(Equal(1))
|
Expect(len(cids4)).To(Equal(1))
|
||||||
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids4[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids4[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids4[0].Transactions)).To(Equal(0))
|
Expect(len(cids4[0].Transactions)).To(Equal(0))
|
||||||
Expect(len(cids4[0].StateNodes)).To(Equal(0))
|
Expect(len(cids4[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids4[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids4[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids4[0].Receipts)).To(Equal(1))
|
Expect(len(cids4[0].Receipts)).To(Equal(1))
|
||||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
||||||
expectedReceiptCID.ID = cids4[0].Receipts[0].ID
|
|
||||||
expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID
|
expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID
|
||||||
|
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
|
||||||
|
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
|
||||||
Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||||
|
|
||||||
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
|
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
|
||||||
@ -335,35 +369,36 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids5)).To(Equal(1))
|
Expect(len(cids5)).To(Equal(1))
|
||||||
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids5[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids5[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids5[0].Transactions)).To(Equal(3))
|
Expect(len(cids5[0].Transactions)).To(Equal(4))
|
||||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
|
||||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
|
||||||
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
|
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
|
||||||
Expect(len(cids5[0].StateNodes)).To(Equal(0))
|
Expect(len(cids5[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids5[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids5[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids5[0].Receipts)).To(Equal(3))
|
Expect(len(cids5[0].Receipts)).To(Equal(4))
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct1CID.String())).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue())
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct2CID.String())).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue())
|
||||||
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct3CID.String())).To(BeTrue())
|
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue())
|
||||||
|
|
||||||
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
|
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids6)).To(Equal(1))
|
Expect(len(cids6)).To(Equal(1))
|
||||||
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids6[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids6[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids6[0].Transactions)).To(Equal(1))
|
Expect(len(cids6[0].Transactions)).To(Equal(1))
|
||||||
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
|
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
|
||||||
expectedTxCID.ID = cids6[0].Transactions[0].ID
|
expectedTxCID.TxHash = cids6[0].Transactions[0].TxHash
|
||||||
expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID
|
expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID
|
||||||
Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID))
|
Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID))
|
||||||
Expect(len(cids6[0].StateNodes)).To(Equal(0))
|
Expect(len(cids6[0].StateNodes)).To(Equal(0))
|
||||||
Expect(len(cids6[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids6[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids6[0].Receipts)).To(Equal(1))
|
Expect(len(cids6[0].Receipts)).To(Equal(1))
|
||||||
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
|
||||||
expectedReceiptCID.ID = cids6[0].Receipts[0].ID
|
|
||||||
expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID
|
expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID
|
||||||
|
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
|
||||||
|
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
|
||||||
Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID))
|
||||||
|
|
||||||
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
|
cids7, empty, err := retriever.Retrieve(stateFilter, 1)
|
||||||
@ -371,13 +406,12 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(empty).ToNot(BeTrue())
|
Expect(empty).ToNot(BeTrue())
|
||||||
Expect(len(cids7)).To(Equal(1))
|
Expect(len(cids7)).To(Equal(1))
|
||||||
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
|
||||||
Expect(cids7[0].Header).To(Equal(eth2.HeaderModel{}))
|
Expect(cids7[0].Header).To(Equal(models.HeaderModel{}))
|
||||||
Expect(len(cids7[0].Transactions)).To(Equal(0))
|
Expect(len(cids7[0].Transactions)).To(Equal(0))
|
||||||
Expect(len(cids7[0].Receipts)).To(Equal(0))
|
Expect(len(cids7[0].Receipts)).To(Equal(0))
|
||||||
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
Expect(len(cids7[0].StorageNodes)).To(Equal(0))
|
||||||
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
Expect(len(cids7[0].StateNodes)).To(Equal(1))
|
||||||
Expect(cids7[0].StateNodes[0]).To(Equal(eth2.StateNodeModel{
|
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{
|
||||||
ID: cids7[0].StateNodes[0].ID,
|
|
||||||
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
HeaderID: cids7[0].StateNodes[0].HeaderID,
|
||||||
NodeType: 2,
|
NodeType: 2,
|
||||||
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
|
||||||
@ -398,8 +432,12 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
It("Gets the number of the first block that has data in the database", func() {
|
It("Gets the number of the first block that has data in the database", func() {
|
||||||
err := repo.Publish(test_helpers.MockConvertedPayload)
|
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveFirstBlockNumber()
|
num, err := retriever.RetrieveFirstBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(1)))
|
Expect(num).To(Equal(int64(1)))
|
||||||
@ -408,8 +446,12 @@ var _ = Describe("Retriever", func() {
|
|||||||
It("Gets the number of the first block that has data in the database", func() {
|
It("Gets the number of the first block that has data in the database", func() {
|
||||||
payload := test_helpers.MockConvertedPayload
|
payload := test_helpers.MockConvertedPayload
|
||||||
payload.Block = newMockBlock(1010101)
|
payload.Block = newMockBlock(1010101)
|
||||||
err := repo.Publish(payload)
|
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveFirstBlockNumber()
|
num, err := retriever.RetrieveFirstBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(1010101)))
|
Expect(num).To(Equal(int64(1010101)))
|
||||||
@ -420,10 +462,16 @@ var _ = Describe("Retriever", func() {
|
|||||||
payload1.Block = newMockBlock(1010101)
|
payload1.Block = newMockBlock(1010101)
|
||||||
payload2 := payload1
|
payload2 := payload1
|
||||||
payload2.Block = newMockBlock(5)
|
payload2.Block = newMockBlock(5)
|
||||||
err := repo.Publish(payload1)
|
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
err = repo.Publish(payload2)
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveFirstBlockNumber()
|
num, err := retriever.RetrieveFirstBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(5)))
|
Expect(num).To(Equal(int64(5)))
|
||||||
@ -436,8 +484,11 @@ var _ = Describe("Retriever", func() {
|
|||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
})
|
})
|
||||||
It("Gets the number of the latest block that has data in the database", func() {
|
It("Gets the number of the latest block that has data in the database", func() {
|
||||||
err := repo.Publish(test_helpers.MockConvertedPayload)
|
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveLastBlockNumber()
|
num, err := retriever.RetrieveLastBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(1)))
|
Expect(num).To(Equal(int64(1)))
|
||||||
@ -446,8 +497,12 @@ var _ = Describe("Retriever", func() {
|
|||||||
It("Gets the number of the latest block that has data in the database", func() {
|
It("Gets the number of the latest block that has data in the database", func() {
|
||||||
payload := test_helpers.MockConvertedPayload
|
payload := test_helpers.MockConvertedPayload
|
||||||
payload.Block = newMockBlock(1010101)
|
payload.Block = newMockBlock(1010101)
|
||||||
err := repo.Publish(payload)
|
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveLastBlockNumber()
|
num, err := retriever.RetrieveLastBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(1010101)))
|
Expect(num).To(Equal(int64(1010101)))
|
||||||
@ -458,10 +513,16 @@ var _ = Describe("Retriever", func() {
|
|||||||
payload1.Block = newMockBlock(1010101)
|
payload1.Block = newMockBlock(1010101)
|
||||||
payload2 := payload1
|
payload2 := payload1
|
||||||
payload2.Block = newMockBlock(5)
|
payload2.Block = newMockBlock(5)
|
||||||
err := repo.Publish(payload1)
|
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
err = repo.Publish(payload2)
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
num, err := retriever.RetrieveLastBlockNumber()
|
num, err := retriever.RetrieveLastBlockNumber()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(num).To(Equal(int64(1010101)))
|
Expect(num).To(Equal(int64(1010101)))
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -29,18 +30,17 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
|
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -60,11 +60,12 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ = Describe("eth state reading tests", func() {
|
var _ = Describe("eth state reading tests", func() {
|
||||||
|
const chainLength = 5
|
||||||
var (
|
var (
|
||||||
blocks []*types.Block
|
blocks []*types.Block
|
||||||
receipts []types.Receipts
|
receipts []types.Receipts
|
||||||
chain *core.BlockChain
|
chain *core.BlockChain
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
api *eth.PublicEthAPI
|
api *eth.PublicEthAPI
|
||||||
backend *eth.Backend
|
backend *eth.Backend
|
||||||
chainConfig = params.TestChainConfig
|
chainConfig = params.TestChainConfig
|
||||||
@ -74,19 +75,27 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
It("test init", func() {
|
It("test init", func() {
|
||||||
// db and type initializations
|
// db and type initializations
|
||||||
var err error
|
var err error
|
||||||
db, err = shared.SetupDB()
|
db = shared.SetupDB()
|
||||||
Expect(err).ToNot(HaveOccurred())
|
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
transformer := eth2.NewStateDiffTransformer(chainConfig, db)
|
|
||||||
backend, err = eth.NewEthBackend(db, ð.Config{
|
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||||
ChainConfig: chainConfig,
|
ChainConfig: chainConfig,
|
||||||
VmConfig: vm.Config{},
|
VMConfig: vm.Config{},
|
||||||
RPCGasCap: big.NewInt(10000000000),
|
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
|
||||||
|
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||||
|
StateDB: ethServerShared.GroupConfig{
|
||||||
|
Name: "eth_state_test",
|
||||||
|
CacheSizeInMB: 8,
|
||||||
|
CacheExpiryInMins: 60,
|
||||||
|
LogStatsIntervalInSecs: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
api = eth.NewPublicEthAPI(backend, nil, false)
|
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false)
|
||||||
|
|
||||||
// make the test blockchain (and state)
|
// make the test blockchain (and state)
|
||||||
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
|
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen)
|
||||||
params := statediff.Params{
|
params := statediff.Params{
|
||||||
IntermediateStateNodes: true,
|
IntermediateStateNodes: true,
|
||||||
IntermediateStorageNodes: true,
|
IntermediateStorageNodes: true,
|
||||||
@ -135,35 +144,45 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
}
|
}
|
||||||
diff, err := builder.BuildStateDiffObject(args, params)
|
diff, err := builder.BuildStateDiffObject(args, params)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
diffRlp, err := rlp.EncodeToBytes(diff)
|
tx, err := transformer.PushBlock(block, rcts, mockTD)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
blockRlp, err := rlp.EncodeToBytes(block)
|
|
||||||
Expect(err).ToNot(HaveOccurred())
|
for _, node := range diff.Nodes {
|
||||||
receiptsRlp, err := rlp.EncodeToBytes(rcts)
|
err = transformer.PushStateNode(tx, node, block.Hash().String())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
payload := statediff.Payload{
|
|
||||||
StateObjectRlp: diffRlp,
|
|
||||||
BlockRlp: blockRlp,
|
|
||||||
ReceiptsRlp: receiptsRlp,
|
|
||||||
TotalDifficulty: mockTD,
|
|
||||||
}
|
}
|
||||||
_, err = transformer.Transform(0, payload)
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||||
indexAndPublisher := eth2.NewIPLDPublisher(db)
|
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
api = eth.NewPublicEthAPI(backend, nil, false)
|
|
||||||
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload)
|
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
// The non-canonical header has a child
|
// The non-canonical header has a child
|
||||||
err = indexAndPublisher.Publish(test_helpers.MockConvertedPayloadForChild)
|
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
|
|
||||||
|
hash := sdtypes.CodeAndCodeHash{
|
||||||
|
Hash: test_helpers.CodeHash,
|
||||||
|
Code: test_helpers.ContractCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = indexAndPublisher.PushCodeAndCodeHash(tx, hash)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
// wait for tx batch process to complete.
|
||||||
|
time.Sleep(10000 * time.Millisecond)
|
||||||
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
defer It("test teardown", func() {
|
defer It("test teardown", func() {
|
||||||
eth.TearDownDB(db)
|
shared.TearDownDB(db)
|
||||||
chain.Stop()
|
chain.Stop()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -234,12 +253,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
|
|
||||||
Expect(err).To(HaveOccurred())
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(err).ToNot(HaveOccurred())
|
||||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
||||||
@ -247,12 +269,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -260,12 +285,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -273,12 +301,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -286,12 +317,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -304,12 +338,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
|
||||||
Expect(err).To(HaveOccurred())
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
|
|
||||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
Expect(bal).To(Equal(expectedBankBalanceBlock1))
|
||||||
@ -317,12 +354,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -330,12 +370,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -343,12 +386,15 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
@ -356,37 +402,45 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedContractBalance))
|
Expect(bal).To(Equal(expectedContractBalance))
|
||||||
|
|
||||||
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
Expect(bal).To(Equal(expectedBankBalanceBlock2))
|
||||||
})
|
})
|
||||||
It("Throws an error for an account it cannot find the balance for an account at the provided block number", func() {
|
It("Returns `0` for an account it cannot find the balance for an account at the provided block number", func() {
|
||||||
_, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
|
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
|
|
||||||
Expect(err).To(HaveOccurred())
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(err).ToNot(HaveOccurred())
|
||||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
})
|
})
|
||||||
It("Throws an error for an account it cannot find the balance for an account at the provided block hash", func() {
|
It("Returns `0` for an error for an account it cannot find the balance for an account at the provided block hash", func() {
|
||||||
_, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
|
||||||
Expect(err).To(HaveOccurred())
|
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(err).ToNot(HaveOccurred())
|
||||||
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
|
||||||
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -409,51 +463,63 @@ var _ = Describe("eth state reading tests", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
|
||||||
})
|
})
|
||||||
It("Throws an error for an account it cannot find the code for", func() {
|
It("Returns `nil` for an account it cannot find the code for", func() {
|
||||||
_, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(code).To(BeEmpty())
|
||||||
})
|
})
|
||||||
It("Throws an error for a contract that doesn't exist at this hieght", func() {
|
It("Returns `nil` for a contract that doesn't exist at this height", func() {
|
||||||
_, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(code).To(BeEmpty())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("eth_getStorageAt", func() {
|
Describe("eth_getStorageAt", func() {
|
||||||
It("Throws an error if it tries to access a contract which does not exist", func() {
|
It("Returns empty slice if it tries to access a contract which does not exist", func() {
|
||||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
|
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||||
|
|
||||||
_, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
|
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||||
})
|
})
|
||||||
It("Throws an error if it tries to access a contract slot which does not exist", func() {
|
It("Returns empty slice if it tries to access a contract slot which does not exist", func() {
|
||||||
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
|
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
|
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||||
})
|
})
|
||||||
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
|
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
|
||||||
// After deployment
|
// After deployment
|
||||||
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
|
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
expectedRes := hexutil.Bytes(common.Hex2Bytes("01"))
|
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||||
Expect(val).To(Equal(expectedRes))
|
Expect(val).To(Equal(expectedRes))
|
||||||
|
|
||||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(3))
|
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("03"))
|
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003"))
|
||||||
Expect(val).To(Equal(expectedRes))
|
Expect(val).To(Equal(expectedRes))
|
||||||
|
|
||||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(4))
|
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
expectedRes = hexutil.Bytes(common.Hex2Bytes("09"))
|
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009"))
|
||||||
Expect(val).To(Equal(expectedRes))
|
Expect(val).To(Equal(expectedRes))
|
||||||
|
|
||||||
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(5))
|
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(val).To(Equal(hexutil.Bytes{}))
|
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue)))
|
||||||
|
})
|
||||||
|
It("Throws an error for a non-existing block hash", func() {
|
||||||
|
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError("header for hash not found"))
|
||||||
|
})
|
||||||
|
It("Throws an error for a non-existing block number", func() {
|
||||||
|
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(err).To(MatchError("header not found"))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestETHWatcher(t *testing.T) {
|
func TestETHSuite(t *testing.T) {
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
RunSpecs(t, "eth ipld server eth suite test")
|
RunSpecs(t, "eth ipld server eth suite test")
|
||||||
}
|
}
|
||||||
|
@ -19,22 +19,20 @@ package eth
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Filterer interface for substituing mocks in tests
|
// Filterer interface for substituing mocks in tests
|
||||||
type Filterer interface {
|
type Filterer interface {
|
||||||
Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error)
|
Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
|
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum
|
||||||
@ -46,7 +44,7 @@ func NewResponseFilterer() *ResponseFilterer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Filter is used to filter through eth data to extract and package requested data into a Payload
|
// Filter is used to filter through eth data to extract and package requested data into a Payload
|
||||||
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error) {
|
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) {
|
||||||
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
|
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
|
||||||
response := new(IPLDs)
|
response := new(IPLDs)
|
||||||
response.TotalDifficulty = payload.TotalDifficulty
|
response.TotalDifficulty = payload.TotalDifficulty
|
||||||
@ -73,7 +71,7 @@ func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload eth.Conve
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload eth.ConvertedPayload) error {
|
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||||
if !headerFilter.Off {
|
if !headerFilter.Off {
|
||||||
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
|
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -83,12 +81,12 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.Header = ipfs.BlockModel{
|
response.Header = models.IPLDModel{
|
||||||
Data: headerRLP,
|
Data: headerRLP,
|
||||||
CID: cid.String(),
|
Key: cid.String(),
|
||||||
}
|
}
|
||||||
if headerFilter.Uncles {
|
if headerFilter.Uncles {
|
||||||
response.Uncles = make([]ipfs.BlockModel, len(payload.Block.Body().Uncles))
|
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles))
|
||||||
for i, uncle := range payload.Block.Body().Uncles {
|
for i, uncle := range payload.Block.Body().Uncles {
|
||||||
uncleRlp, err := rlp.EncodeToBytes(uncle)
|
uncleRlp, err := rlp.EncodeToBytes(uncle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -98,9 +96,9 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.Uncles[i] = ipfs.BlockModel{
|
response.Uncles[i] = models.IPLDModel{
|
||||||
Data: uncleRlp,
|
Data: uncleRlp,
|
||||||
CID: cid.String(),
|
Key: cid.String(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,12 +113,12 @@ func checkRange(start, end, actual int64) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload eth.ConvertedPayload) ([]common.Hash, error) {
|
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) {
|
||||||
var trxHashes []common.Hash
|
var trxHashes []common.Hash
|
||||||
if !trxFilter.Off {
|
if !trxFilter.Off {
|
||||||
trxLen := len(payload.Block.Body().Transactions)
|
trxLen := len(payload.Block.Body().Transactions)
|
||||||
trxHashes = make([]common.Hash, 0, trxLen)
|
trxHashes = make([]common.Hash, 0, trxLen)
|
||||||
response.Transactions = make([]ipfs.BlockModel, 0, trxLen)
|
response.Transactions = make([]models.IPLDModel, 0, trxLen)
|
||||||
for i, trx := range payload.Block.Body().Transactions {
|
for i, trx := range payload.Block.Body().Transactions {
|
||||||
// TODO: check if want corresponding receipt and if we do we must include this transaction
|
// TODO: check if want corresponding receipt and if we do we must include this transaction
|
||||||
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
|
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
|
||||||
@ -133,9 +131,9 @@ func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLD
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
response.Transactions = append(response.Transactions, ipfs.BlockModel{
|
response.Transactions = append(response.Transactions, models.IPLDModel{
|
||||||
Data: data,
|
Data: data,
|
||||||
CID: cid.String(),
|
Key: cid.String(),
|
||||||
})
|
})
|
||||||
trxHashes = append(trxHashes, trx.Hash())
|
trxHashes = append(trxHashes, trx.Hash())
|
||||||
}
|
}
|
||||||
@ -163,25 +161,30 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload eth.ConvertedPayload, trxHashes []common.Hash) error {
|
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error {
|
||||||
if !receiptFilter.Off {
|
if !receiptFilter.Off {
|
||||||
response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
|
response.Receipts = make([]models.IPLDModel, 0, len(payload.Receipts))
|
||||||
for i, receipt := range payload.Receipts {
|
rctLeafCID, rctIPLDData, err := GetRctLeafNodeData(payload.Receipts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, receipt := range payload.Receipts {
|
||||||
// topics is always length 4
|
// topics is always length 4
|
||||||
topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s}
|
topics := make([][]string, 4)
|
||||||
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, payload.ReceiptMetaData[i].LogContracts, trxHashes) {
|
contracts := make([]string, len(receipt.Logs))
|
||||||
receiptBuffer := new(bytes.Buffer)
|
for _, l := range receipt.Logs {
|
||||||
if err := receipt.EncodeRLP(receiptBuffer); err != nil {
|
contracts = append(contracts, l.Address.String())
|
||||||
return err
|
for idx, t := range l.Topics {
|
||||||
|
topics[idx] = append(topics[idx], t.String())
|
||||||
}
|
}
|
||||||
data := receiptBuffer.Bytes()
|
}
|
||||||
cid, err := ipld.RawdataToCid(ipld.MEthTxReceipt, data, multihash.KECCAK_256)
|
|
||||||
if err != nil {
|
// TODO: Verify this filter logic.
|
||||||
return err
|
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) {
|
||||||
}
|
response.Receipts = append(response.Receipts, models.IPLDModel{
|
||||||
response.Receipts = append(response.Receipts, ipfs.BlockModel{
|
Data: rctIPLDData[idx],
|
||||||
Data: data,
|
Key: rctLeafCID[idx].String(),
|
||||||
CID: cid.String(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,7 +256,7 @@ func slicesShareString(slice1, slice2 []string) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters
|
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters
|
||||||
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload eth.ConvertedPayload) error {
|
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error {
|
||||||
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
|
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
|
||||||
response.StorageNodes = make([]StorageNode, 0)
|
response.StorageNodes = make([]StorageNode, 0)
|
||||||
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
|
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
|
||||||
@ -270,37 +273,37 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
|||||||
}
|
}
|
||||||
for _, stateNode := range payload.StateNodes {
|
for _, stateNode := range payload.StateNodes {
|
||||||
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
|
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
|
||||||
if stateNode.Type == sdtypes.Leaf || stateFilter.IntermediateNodes {
|
if stateNode.NodeType == sdtypes.Leaf || stateFilter.IntermediateNodes {
|
||||||
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.Value, multihash.KECCAK_256)
|
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.NodeValue, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.StateNodes = append(response.StateNodes, StateNode{
|
response.StateNodes = append(response.StateNodes, StateNode{
|
||||||
StateLeafKey: stateNode.LeafKey,
|
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||||
Path: stateNode.Path,
|
Path: stateNode.Path,
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: stateNode.Value,
|
Data: stateNode.NodeValue,
|
||||||
CID: cid.String(),
|
Key: cid.String(),
|
||||||
},
|
},
|
||||||
Type: stateNode.Type,
|
Type: stateNode.NodeType,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
|
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
|
||||||
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
|
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
|
||||||
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
|
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
|
||||||
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.Value, multihash.KECCAK_256)
|
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.NodeValue, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
response.StorageNodes = append(response.StorageNodes, StorageNode{
|
response.StorageNodes = append(response.StorageNodes, StorageNode{
|
||||||
StateLeafKey: stateNode.LeafKey,
|
StateLeafKey: common.BytesToHash(stateNode.LeafKey),
|
||||||
StorageLeafKey: storageNode.LeafKey,
|
StorageLeafKey: common.BytesToHash(storageNode.LeafKey),
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: storageNode.Value,
|
Data: storageNode.NodeValue,
|
||||||
CID: cid.String(),
|
Key: cid.String(),
|
||||||
},
|
},
|
||||||
Type: storageNode.Type,
|
Type: storageNode.NodeType,
|
||||||
Path: storageNode.Path,
|
Path: storageNode.Path,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -310,15 +313,52 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
|
func checkNodeKeys(wantedKeys []common.Hash, actualKey []byte) bool {
|
||||||
// If we aren't filtering for any specific keys, all nodes are a go
|
// If we aren't filtering for any specific keys, all nodes are a go
|
||||||
if len(wantedKeys) == 0 {
|
if len(wantedKeys) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
for _, key := range wantedKeys {
|
for _, key := range wantedKeys {
|
||||||
if bytes.Equal(key.Bytes(), actualKey.Bytes()) {
|
if bytes.Equal(key.Bytes(), actualKey) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRctLeafNodeData converts the receipts to receipt trie and returns the receipt leaf node IPLD data and
|
||||||
|
// corresponding CIDs
|
||||||
|
func GetRctLeafNodeData(rcts types.Receipts) ([]cid.Cid, [][]byte, error) {
|
||||||
|
receiptTrie := ipld.NewRctTrie()
|
||||||
|
for idx, rct := range rcts {
|
||||||
|
ethRct, err := ipld.NewReceipt(rct)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err = receiptTrie.Add(idx, ethRct.RawData()); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rctLeafNodes, keys, err := receiptTrie.GetLeafNodes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ethRctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
|
||||||
|
ethRctleafNodeData := make([][]byte, len(rctLeafNodes))
|
||||||
|
for i, rln := range rctLeafNodes {
|
||||||
|
var idx uint
|
||||||
|
|
||||||
|
r := bytes.NewReader(keys[i].TrieKey)
|
||||||
|
err = rlp.Decode(r, &idx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ethRctleafNodeCids[idx] = rln.Cid()
|
||||||
|
ethRctleafNodeData[idx] = rln.RawData()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ethRctleafNodeCids, ethRctleafNodeData, nil
|
||||||
|
}
|
||||||
|
@ -19,16 +19,15 @@ package eth_test
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -47,29 +46,29 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(iplds).ToNot(BeNil())
|
Expect(iplds).ToNot(BeNil())
|
||||||
Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
|
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
|
||||||
var expectedEmptyUncles []ipfs.BlockModel
|
var expectedEmptyUncles []models.IPLDModel
|
||||||
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
|
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
|
||||||
Expect(len(iplds.Transactions)).To(Equal(3))
|
Expect(len(iplds.Transactions)).To(Equal(4))
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(0))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(2))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue())
|
||||||
Expect(len(iplds.Receipts)).To(Equal(3))
|
Expect(len(iplds.Receipts)).To(Equal(4))
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(0))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(1))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(2))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
|
||||||
Expect(len(iplds.StateNodes)).To(Equal(2))
|
Expect(len(iplds.StateNodes)).To(Equal(2))
|
||||||
for _, stateNode := range iplds.StateNodes {
|
for _, stateNode := range iplds.StateNodes {
|
||||||
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
|
||||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
|
||||||
Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
|
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State2IPLD.RawData(),
|
Data: test_helpers.State2IPLD.RawData(),
|
||||||
CID: test_helpers.State2IPLD.Cid().String(),
|
Key: test_helpers.State2IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
|
||||||
Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
|
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State1IPLD.RawData(),
|
Data: test_helpers.State1IPLD.RawData(),
|
||||||
CID: test_helpers.State1IPLD.Cid().String(),
|
Key: test_helpers.State1IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -81,116 +80,116 @@ var _ = Describe("Filterer", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds1).ToNot(BeNil())
|
Expect(iplds1).ToNot(BeNil())
|
||||||
Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds1.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds1.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds1.Uncles)).To(Equal(0))
|
Expect(len(iplds1.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds1.Transactions)).To(Equal(0))
|
Expect(len(iplds1.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds1.StorageNodes)).To(Equal(0))
|
Expect(len(iplds1.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
Expect(len(iplds1.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds1.Receipts)).To(Equal(1))
|
Expect(len(iplds1.Receipts)).To(Equal(1))
|
||||||
Expect(iplds1.Receipts[0]).To(Equal(ipfs.BlockModel{
|
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD.RawData(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
CID: test_helpers.Rct1IPLD.Cid().String(),
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
|
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds2).ToNot(BeNil())
|
Expect(iplds2).ToNot(BeNil())
|
||||||
Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds2.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds2.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds2.Uncles)).To(Equal(0))
|
Expect(len(iplds2.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds2.Transactions)).To(Equal(0))
|
Expect(len(iplds2.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds2.StorageNodes)).To(Equal(0))
|
Expect(len(iplds2.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
Expect(len(iplds2.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds2.Receipts)).To(Equal(1))
|
Expect(len(iplds2.Receipts)).To(Equal(1))
|
||||||
Expect(iplds2.Receipts[0]).To(Equal(ipfs.BlockModel{
|
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD.RawData(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
CID: test_helpers.Rct1IPLD.Cid().String(),
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
|
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds3).ToNot(BeNil())
|
Expect(iplds3).ToNot(BeNil())
|
||||||
Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds3.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds3.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds3.Uncles)).To(Equal(0))
|
Expect(len(iplds3.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds3.Transactions)).To(Equal(0))
|
Expect(len(iplds3.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds3.StorageNodes)).To(Equal(0))
|
Expect(len(iplds3.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
Expect(len(iplds3.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds3.Receipts)).To(Equal(1))
|
Expect(len(iplds3.Receipts)).To(Equal(1))
|
||||||
Expect(iplds3.Receipts[0]).To(Equal(ipfs.BlockModel{
|
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct1IPLD.RawData(),
|
Data: test_helpers.Rct1IPLD,
|
||||||
CID: test_helpers.Rct1IPLD.Cid().String(),
|
Key: test_helpers.Rct1CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
|
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds4).ToNot(BeNil())
|
Expect(iplds4).ToNot(BeNil())
|
||||||
Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds4.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds4.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds4.Uncles)).To(Equal(0))
|
Expect(len(iplds4.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds4.Transactions)).To(Equal(0))
|
Expect(len(iplds4.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds4.StorageNodes)).To(Equal(0))
|
Expect(len(iplds4.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
Expect(len(iplds4.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds4.Receipts)).To(Equal(1))
|
Expect(len(iplds4.Receipts)).To(Equal(1))
|
||||||
Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
|
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct2IPLD.RawData(),
|
Data: test_helpers.Rct2IPLD,
|
||||||
CID: test_helpers.Rct2IPLD.Cid().String(),
|
Key: test_helpers.Rct2CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
|
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds5).ToNot(BeNil())
|
Expect(iplds5).ToNot(BeNil())
|
||||||
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds5.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds5.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds5.Uncles)).To(Equal(0))
|
Expect(len(iplds5.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds5.Transactions)).To(Equal(3))
|
Expect(len(iplds5.Transactions)).To(Equal(4))
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(0))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(2))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue())
|
||||||
Expect(len(iplds5.StorageNodes)).To(Equal(0))
|
Expect(len(iplds5.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds5.StateNodes)).To(Equal(0))
|
Expect(len(iplds5.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds5.Receipts)).To(Equal(3))
|
Expect(len(iplds5.Receipts)).To(Equal(4))
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(0))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1IPLD)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(1))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2IPLD)).To(BeTrue())
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(2))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3IPLD)).To(BeTrue())
|
||||||
|
|
||||||
iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload)
|
iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds6).ToNot(BeNil())
|
Expect(iplds6).ToNot(BeNil())
|
||||||
Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds6.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds6.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds6.Uncles)).To(Equal(0))
|
Expect(len(iplds6.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds6.Transactions)).To(Equal(1))
|
Expect(len(iplds6.Transactions)).To(Equal(1))
|
||||||
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
|
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue())
|
||||||
Expect(len(iplds6.StorageNodes)).To(Equal(0))
|
Expect(len(iplds6.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
Expect(len(iplds6.StateNodes)).To(Equal(0))
|
||||||
Expect(len(iplds6.Receipts)).To(Equal(1))
|
Expect(len(iplds6.Receipts)).To(Equal(1))
|
||||||
Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
|
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.Rct2IPLD.RawData(),
|
Data: test_helpers.Rct2IPLD,
|
||||||
CID: test_helpers.Rct2IPLD.Cid().String(),
|
Key: test_helpers.Rct2CID.String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
|
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds7).ToNot(BeNil())
|
Expect(iplds7).ToNot(BeNil())
|
||||||
Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds7.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds7.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds7.Uncles)).To(Equal(0))
|
Expect(len(iplds7.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds7.Transactions)).To(Equal(0))
|
Expect(len(iplds7.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds7.StorageNodes)).To(Equal(0))
|
Expect(len(iplds7.StorageNodes)).To(Equal(0))
|
||||||
Expect(len(iplds7.Receipts)).To(Equal(0))
|
Expect(len(iplds7.Receipts)).To(Equal(0))
|
||||||
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
Expect(len(iplds7.StateNodes)).To(Equal(1))
|
||||||
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
|
||||||
Expect(iplds7.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
|
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{
|
||||||
Data: test_helpers.State2IPLD.RawData(),
|
Data: test_helpers.State2IPLD.RawData(),
|
||||||
CID: test_helpers.State2IPLD.Cid().String(),
|
Key: test_helpers.State2IPLD.Cid().String(),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
|
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(iplds8).ToNot(BeNil())
|
Expect(iplds8).ToNot(BeNil())
|
||||||
Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
|
||||||
Expect(iplds8.Header).To(Equal(ipfs.BlockModel{}))
|
Expect(iplds8.Header).To(Equal(models.IPLDModel{}))
|
||||||
Expect(len(iplds8.Uncles)).To(Equal(0))
|
Expect(len(iplds8.Uncles)).To(Equal(0))
|
||||||
Expect(len(iplds8.Transactions)).To(Equal(0))
|
Expect(len(iplds8.Transactions)).To(Equal(0))
|
||||||
Expect(len(iplds8.StorageNodes)).To(Equal(0))
|
Expect(len(iplds8.StorageNodes)).To(Equal(0))
|
||||||
|
@ -17,11 +17,7 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ResolveToNodeType(nodeType int) sdtypes.NodeType {
|
func ResolveToNodeType(nodeType int) sdtypes.NodeType {
|
||||||
@ -38,19 +34,3 @@ func ResolveToNodeType(nodeType int) sdtypes.NodeType {
|
|||||||
return sdtypes.Unknown
|
return sdtypes.Unknown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainConfig returns the appropriate ethereum chain config for the provided chain id
|
|
||||||
func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
|
|
||||||
switch chainID {
|
|
||||||
case 1:
|
|
||||||
return params.MainnetChainConfig, nil
|
|
||||||
case 3:
|
|
||||||
return params.RopstenChainConfig, nil
|
|
||||||
case 4:
|
|
||||||
return params.RinkebyChainConfig, nil
|
|
||||||
case 5:
|
|
||||||
return params.GoerliChainConfig, nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("chain config for chainid %d not available", chainID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -22,14 +22,10 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/shared"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fetcher interface for substituting mocks in tests
|
// Fetcher interface for substituting mocks in tests
|
||||||
@ -40,11 +36,11 @@ type Fetcher interface {
|
|||||||
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
|
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum
|
||||||
// It interfaces directly with PG-IPFS
|
// It interfaces directly with PG-IPFS
|
||||||
type IPLDFetcher struct {
|
type IPLDFetcher struct {
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
|
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
|
||||||
func NewIPLDFetcher(db *postgres.DB) *IPLDFetcher {
|
func NewIPLDFetcher(db *sqlx.DB) *IPLDFetcher {
|
||||||
return &IPLDFetcher{
|
return &IPLDFetcher{
|
||||||
db: db,
|
db: db,
|
||||||
}
|
}
|
||||||
@ -103,72 +99,73 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
|
|||||||
return iplds, err
|
return iplds, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchHeaders fetches headers
|
// FetchHeader fetches header
|
||||||
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c eth.HeaderModel) (ipfs.BlockModel, error) {
|
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) {
|
||||||
log.Debug("fetching header ipld")
|
log.Debug("fetching header ipld")
|
||||||
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ipfs.BlockModel{}, err
|
return models.IPLDModel{}, err
|
||||||
}
|
}
|
||||||
return ipfs.BlockModel{
|
return models.IPLDModel{
|
||||||
Data: headerBytes,
|
Data: headerBytes,
|
||||||
CID: c.CID,
|
Key: c.CID,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchUncles fetches uncles
|
// FetchUncles fetches uncles
|
||||||
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []eth.UncleModel) ([]ipfs.BlockModel, error) {
|
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]models.IPLDModel, error) {
|
||||||
log.Debug("fetching uncle iplds")
|
log.Debug("fetching uncle iplds")
|
||||||
uncleIPLDs := make([]ipfs.BlockModel, len(cids))
|
uncleIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
uncleIPLDs[i] = ipfs.BlockModel{
|
uncleIPLDs[i] = models.IPLDModel{
|
||||||
Data: uncleBytes,
|
Data: uncleBytes,
|
||||||
CID: c.CID,
|
Key: c.CID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return uncleIPLDs, nil
|
return uncleIPLDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchTrxs fetches transactions
|
// FetchTrxs fetches transactions
|
||||||
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []eth.TxModel) ([]ipfs.BlockModel, error) {
|
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IPLDModel, error) {
|
||||||
log.Debug("fetching transaction iplds")
|
log.Debug("fetching transaction iplds")
|
||||||
trxIPLDs := make([]ipfs.BlockModel, len(cids))
|
trxIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
trxIPLDs[i] = ipfs.BlockModel{
|
trxIPLDs[i] = models.IPLDModel{
|
||||||
Data: txBytes,
|
Data: txBytes,
|
||||||
CID: c.CID,
|
Key: c.CID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return trxIPLDs, nil
|
return trxIPLDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchRcts fetches receipts
|
// FetchRcts fetches receipts
|
||||||
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []eth.ReceiptModel) ([]ipfs.BlockModel, error) {
|
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]models.IPLDModel, error) {
|
||||||
log.Debug("fetching receipt iplds")
|
log.Debug("fetching receipt iplds")
|
||||||
rctIPLDs := make([]ipfs.BlockModel, len(cids))
|
rctIPLDs := make([]models.IPLDModel, len(cids))
|
||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
|
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.LeafMhKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rctIPLDs[i] = ipfs.BlockModel{
|
//nodeVal, err := DecodeLeafNode(rctBytes)
|
||||||
|
rctIPLDs[i] = models.IPLDModel{
|
||||||
Data: rctBytes,
|
Data: rctBytes,
|
||||||
CID: c.CID,
|
Key: c.LeafCID,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return rctIPLDs, nil
|
return rctIPLDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchState fetches state nodes
|
// FetchState fetches state nodes
|
||||||
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]StateNode, error) {
|
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]StateNode, error) {
|
||||||
log.Debug("fetching state iplds")
|
log.Debug("fetching state iplds")
|
||||||
stateNodes := make([]StateNode, 0, len(cids))
|
stateNodes := make([]StateNode, 0, len(cids))
|
||||||
for _, stateNode := range cids {
|
for _, stateNode := range cids {
|
||||||
@ -180,9 +177,9 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]Stat
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
stateNodes = append(stateNodes, StateNode{
|
stateNodes = append(stateNodes, StateNode{
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: stateBytes,
|
Data: stateBytes,
|
||||||
CID: stateNode.CID,
|
Key: stateNode.CID,
|
||||||
},
|
},
|
||||||
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
StateLeafKey: common.HexToHash(stateNode.StateKey),
|
||||||
Type: ResolveToNodeType(stateNode.NodeType),
|
Type: ResolveToNodeType(stateNode.NodeType),
|
||||||
@ -193,7 +190,7 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]Stat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FetchStorage fetches storage nodes
|
// FetchStorage fetches storage nodes
|
||||||
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []eth.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
|
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
|
||||||
log.Debug("fetching storage iplds")
|
log.Debug("fetching storage iplds")
|
||||||
storageNodes := make([]StorageNode, 0, len(cids))
|
storageNodes := make([]StorageNode, 0, len(cids))
|
||||||
for _, storageNode := range cids {
|
for _, storageNode := range cids {
|
||||||
@ -205,9 +202,9 @@ func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []eth.StorageNodeWithStateK
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
storageNodes = append(storageNodes, StorageNode{
|
storageNodes = append(storageNodes, StorageNode{
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: storageBytes,
|
Data: storageBytes,
|
||||||
CID: storageNode.CID,
|
Key: storageNode.CID,
|
||||||
},
|
},
|
||||||
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
StateLeafKey: common.HexToHash(storageNode.StateKey),
|
||||||
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
StorageLeafKey: common.HexToHash(storageNode.StorageKey),
|
||||||
|
@ -17,35 +17,45 @@
|
|||||||
package eth_test
|
package eth_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("IPLDFetcher", func() {
|
var _ = Describe("IPLDFetcher", func() {
|
||||||
var (
|
var (
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
pubAndIndexer *eth2.IPLDPublisher
|
pubAndIndexer interfaces.StateDiffIndexer
|
||||||
fetcher *eth.IPLDFetcher
|
fetcher *eth.IPLDFetcher
|
||||||
)
|
)
|
||||||
Describe("Fetch", func() {
|
Describe("Fetch", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
var (
|
||||||
db, err = shared.SetupDB()
|
err error
|
||||||
Expect(err).ToNot(HaveOccurred())
|
tx interfaces.Batch
|
||||||
pubAndIndexer = eth2.NewIPLDPublisher(db)
|
)
|
||||||
err = pubAndIndexer.Publish(test_helpers.MockConvertedPayload)
|
db = shared.SetupDB()
|
||||||
|
pubAndIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
|
||||||
|
|
||||||
|
tx, err = pubAndIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
|
for _, node := range test_helpers.MockStateNodes {
|
||||||
|
err = pubAndIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
fetcher = eth.NewIPLDFetcher(db)
|
fetcher = eth.NewIPLDFetcher(db)
|
||||||
|
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
eth.TearDownDB(db)
|
shared.TearDownDB(db)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
|
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
|
||||||
|
@ -19,25 +19,31 @@ package eth
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// node type removed value.
|
||||||
|
// https://github.com/vulcanize/go-ethereum/blob/271f4d01e7e2767ffd8e0cd469bf545be96f2a84/statediff/indexer/helpers.go#L34
|
||||||
|
removedNode = 3
|
||||||
|
|
||||||
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
RetrieveHeadersByHashesPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
RetrieveHeaderByHashPgStr = `SELECT cid, data
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
RetrieveUnclesByHashesPgStr = `SELECT cid, data
|
||||||
@ -46,12 +52,12 @@ const (
|
|||||||
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
WHERE block_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||||
WHERE block_hash = $1`
|
WHERE block_hash = $1`
|
||||||
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
|
||||||
FROM eth.uncle_cids
|
FROM eth.uncle_cids
|
||||||
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
RetrieveUncleByHashPgStr = `SELECT cid, data
|
RetrieveUncleByHashPgStr = `SELECT cid, data
|
||||||
@ -64,13 +70,13 @@ const (
|
|||||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||||
WHERE block_hash = $1
|
WHERE block_hash = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
|
||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||||
WHERE block_number = $1
|
WHERE block_number = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
@ -78,97 +84,99 @@ const (
|
|||||||
FROM eth.transaction_cids
|
FROM eth.transaction_cids
|
||||||
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
|
||||||
WHERE tx_hash = $1`
|
WHERE tx_hash = $1`
|
||||||
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.cid, data
|
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||||
INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||||
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
WHERE tx_hash = ANY($1::VARCHAR(66)[])`
|
||||||
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.cid, data
|
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||||
WHERE block_hash = $1
|
WHERE block_hash = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.cid, data
|
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||||
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||||
WHERE block_number = $1
|
WHERE block_number = $1
|
||||||
ORDER BY eth.transaction_cids.index ASC`
|
ORDER BY eth.transaction_cids.index ASC`
|
||||||
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.cid, data
|
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
|
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash)
|
||||||
INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key)
|
||||||
WHERE tx_hash = $1`
|
WHERE tx_hash = $1`
|
||||||
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid,
|
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||||
data,
|
|
||||||
was_state_removed(state_path, block_number, $2) AS removed
|
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND block_number <= (SELECT block_number
|
AND block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $2)
|
WHERE block_hash = $2)
|
||||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid,
|
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type
|
||||||
data,
|
|
||||||
was_state_removed(state_path, block_number, (SELECT block_hash
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_number = $2
|
|
||||||
LIMIT 1)) AS removed
|
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND block_number <= $2
|
AND block_number <= $2
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid,
|
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||||
data,
|
|
||||||
was_storage_removed(storage_path, block_number, (SELECT block_hash
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_number = $3
|
|
||||||
LIMIT 1)) AS removed
|
|
||||||
FROM eth.storage_cids
|
FROM eth.storage_cids
|
||||||
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
INNER JOIN eth.state_cids ON (
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
storage_cids.header_id = state_cids.header_id
|
||||||
|
AND storage_cids.state_path = state_cids.state_path
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND block_number <= $3
|
AND block_number <= $3
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid,
|
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed
|
||||||
data,
|
|
||||||
was_storage_removed(storage_path, block_number, $3) AS removed
|
|
||||||
FROM eth.storage_cids
|
FROM eth.storage_cids
|
||||||
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
|
INNER JOIN eth.state_cids ON (
|
||||||
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
|
storage_cids.header_id = state_cids.header_id
|
||||||
|
AND storage_cids.state_path = state_cids.state_path
|
||||||
|
)
|
||||||
|
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
|
||||||
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
|
||||||
WHERE state_leaf_key = $1
|
WHERE state_leaf_key = $1
|
||||||
AND storage_leaf_key = $2
|
AND storage_leaf_key = $2
|
||||||
AND block_number <= (SELECT block_number
|
AND block_number <= (SELECT block_number
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_hash = $3)
|
WHERE block_hash = $3)
|
||||||
AND header_cids.id = (SELECT canonical_header_id(block_number))
|
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number))
|
||||||
ORDER BY block_number DESC
|
ORDER BY block_number DESC
|
||||||
LIMIT 1`
|
LIMIT 1`
|
||||||
)
|
)
|
||||||
|
|
||||||
type ipldResult struct {
|
var EmptyNodeValue = make([]byte, common.HashLength)
|
||||||
CID string `db:"cid"`
|
|
||||||
Data []byte `db:"data"`
|
type rctIpldResult struct {
|
||||||
}
|
LeafCID string `db:"leaf_cid"`
|
||||||
type IPLDRetriever struct {
|
Data []byte `db:"data"`
|
||||||
db *postgres.DB
|
TxHash string `db:"tx_hash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIPLDRetriever(db *postgres.DB) *IPLDRetriever {
|
type ipldResult struct {
|
||||||
|
CID string `db:"cid"`
|
||||||
|
Data []byte `db:"data"`
|
||||||
|
TxHash string `db:"tx_hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPLDRetriever struct {
|
||||||
|
db *sqlx.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIPLDRetriever(db *sqlx.DB) *IPLDRetriever {
|
||||||
return &IPLDRetriever{
|
return &IPLDRetriever{
|
||||||
db: db,
|
db: db,
|
||||||
}
|
}
|
||||||
@ -325,9 +333,26 @@ func (r *IPLDRetriever) RetrieveTransactionByTxHash(hash common.Hash) (string, [
|
|||||||
return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex())
|
return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeLeafNode decodes the leaf node data
|
||||||
|
func DecodeLeafNode(node []byte) ([]byte, error) {
|
||||||
|
var nodeElements []interface{}
|
||||||
|
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ty, err := trie_helpers.CheckKeyType(nodeElements)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty != sdtypes.Leaf {
|
||||||
|
return nil, fmt.Errorf("expected leaf node but found %s", ty)
|
||||||
|
}
|
||||||
|
return nodeElements[1].([]byte), nil
|
||||||
|
}
|
||||||
|
|
||||||
// RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes
|
// RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes
|
||||||
func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) {
|
||||||
rctResults := make([]ipldResult, 0)
|
rctResults := make([]rctIpldResult, 0)
|
||||||
hashStrs := make([]string, len(hashes))
|
hashStrs := make([]string, len(hashes))
|
||||||
for i, hash := range hashes {
|
for i, hash := range hashes {
|
||||||
hashStrs[i] = hash.Hex()
|
hashStrs[i] = hash.Hex()
|
||||||
@ -338,52 +363,80 @@ func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]stri
|
|||||||
cids := make([]string, len(rctResults))
|
cids := make([]string, len(rctResults))
|
||||||
rcts := make([][]byte, len(rctResults))
|
rcts := make([][]byte, len(rctResults))
|
||||||
for i, res := range rctResults {
|
for i, res := range rctResults {
|
||||||
cids[i] = res.CID
|
cids[i] = res.LeafCID
|
||||||
rcts[i] = res.Data
|
nodeVal, err := DecodeLeafNode(res.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rcts[i] = nodeVal
|
||||||
}
|
}
|
||||||
return cids, rcts, nil
|
return cids, rcts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash
|
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
|
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||||
rctResults := make([]ipldResult, 0)
|
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, []common.Hash, error) {
|
||||||
|
rctResults := make([]rctIpldResult, 0)
|
||||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
cids := make([]string, len(rctResults))
|
cids := make([]string, len(rctResults))
|
||||||
rcts := make([][]byte, len(rctResults))
|
rcts := make([][]byte, len(rctResults))
|
||||||
|
txs := make([]common.Hash, len(rctResults))
|
||||||
|
|
||||||
for i, res := range rctResults {
|
for i, res := range rctResults {
|
||||||
cids[i] = res.CID
|
cids[i] = res.LeafCID
|
||||||
rcts[i] = res.Data
|
nodeVal, err := DecodeLeafNode(res.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
rcts[i] = nodeVal
|
||||||
|
txs[i] = common.HexToHash(res.TxHash)
|
||||||
}
|
}
|
||||||
return cids, rcts, nil
|
|
||||||
|
return cids, rcts, txs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash
|
// RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash.
|
||||||
|
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||||
func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) {
|
||||||
rctResults := make([]ipldResult, 0)
|
rctResults := make([]rctIpldResult, 0)
|
||||||
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil {
|
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
cids := make([]string, len(rctResults))
|
cids := make([]string, len(rctResults))
|
||||||
rcts := make([][]byte, len(rctResults))
|
rcts := make([][]byte, len(rctResults))
|
||||||
for i, res := range rctResults {
|
for i, res := range rctResults {
|
||||||
cids[i] = res.CID
|
cids[i] = res.LeafCID
|
||||||
rcts[i] = res.Data
|
nodeVal, err := DecodeLeafNode(res.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
rcts[i] = nodeVal
|
||||||
}
|
}
|
||||||
return cids, rcts, nil
|
return cids, rcts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash
|
// RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash.
|
||||||
|
// cid returned corresponds to the leaf node data which contains the receipt.
|
||||||
func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) {
|
func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) {
|
||||||
rctResult := new(ipldResult)
|
rctResult := new(rctIpldResult)
|
||||||
return rctResult.CID, rctResult.Data, r.db.Get(rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex())
|
if err := r.db.Select(&rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex()); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeVal, err := DecodeLeafNode(rctResult.Data)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
return rctResult.LeafCID, nodeVal, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeInfo struct {
|
type nodeInfo struct {
|
||||||
CID string `db:"cid"`
|
CID string `db:"cid"`
|
||||||
Data []byte `db:"data"`
|
Data []byte `db:"data"`
|
||||||
Removed bool `db:"removed"`
|
NodeType int `db:"node_type"`
|
||||||
|
StateLeafRemoved bool `db:"state_leaf_removed"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
|
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
|
||||||
@ -394,9 +447,11 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Addr
|
|||||||
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
|
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
if accountResult.Removed {
|
|
||||||
return "", []byte{}, nil
|
if accountResult.NodeType == removedNode {
|
||||||
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||||
@ -415,9 +470,11 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
|
|||||||
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil {
|
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
if accountResult.Removed {
|
|
||||||
return "", []byte{}, nil
|
if accountResult.NodeType == removedNode {
|
||||||
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
|
||||||
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
|
||||||
@ -428,25 +485,26 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
|
|||||||
return accountResult.CID, i[1].([]byte), nil
|
return accountResult.CID, i[1].([]byte), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveStorageAtByAddressAndStorageKeyAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block hash
|
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash
|
||||||
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockHash(address common.Address, storageLeafKey, hash common.Hash) (string, []byte, error) {
|
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) (string, []byte, []byte, error) {
|
||||||
storageResult := new(nodeInfo)
|
storageResult := new(nodeInfo)
|
||||||
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
stateLeafKey := crypto.Keccak256Hash(address.Bytes())
|
||||||
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), hash.Hex()); err != nil {
|
storageHash := crypto.Keccak256Hash(key.Bytes())
|
||||||
return "", nil, err
|
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
}
|
}
|
||||||
if storageResult.Removed {
|
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||||
return "", []byte{}, nil
|
return "", EmptyNodeValue, EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||||
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
|
||||||
return "", nil, err
|
return "", nil, nil, err
|
||||||
}
|
}
|
||||||
if len(i) != 2 {
|
if len(i) != 2 {
|
||||||
return "", nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
|
return "", nil, nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
|
||||||
}
|
}
|
||||||
return storageResult.CID, i[1].([]byte), nil
|
return storageResult.CID, storageResult.Data, i[1].([]byte), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number
|
// RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number
|
||||||
@ -457,8 +515,9 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(ad
|
|||||||
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil {
|
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
if storageResult.Removed {
|
|
||||||
return "", []byte{}, nil
|
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode {
|
||||||
|
return "", EmptyNodeValue, nil
|
||||||
}
|
}
|
||||||
var i []interface{}
|
var i []interface{}
|
||||||
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
|
||||||
|
@ -17,36 +17,11 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
. "github.com/onsi/gomega"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TearDownDB is used to tear down the watcher dbs after tests
|
|
||||||
func TearDownDB(db *postgres.DB) {
|
|
||||||
tx, err := db.Beginx()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = tx.Commit()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
|
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
|
||||||
func TxModelsContainsCID(txs []eth.TxModel, cid string) bool {
|
func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
if tx.CID == cid {
|
if tx.CID == cid {
|
||||||
return true
|
return true
|
||||||
@ -55,10 +30,10 @@ func TxModelsContainsCID(txs []eth.TxModel, cid string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListContainsBytes used to check if a list of byte arrays contains a particular byte array
|
// ReceiptModelsContainsCID used to check if a list of ReceiptModel contains a specific cid string
|
||||||
func ReceiptModelsContainsCID(rcts []eth.ReceiptModel, cid string) bool {
|
func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool {
|
||||||
for _, rct := range rcts {
|
for _, rct := range rcts {
|
||||||
if rct.CID == cid {
|
if rct.LeafCID == cid {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,6 +62,7 @@ data function sig: 73d4a13a
|
|||||||
// the returned hash chain is ordered head->parent.
|
// the returned hash chain is ordered head->parent.
|
||||||
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) {
|
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) {
|
||||||
config := params.TestChainConfig
|
config := params.TestChainConfig
|
||||||
|
config.LondonBlock = big.NewInt(100)
|
||||||
blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
|
blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
|
||||||
chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
return append([]*types.Block{parent}, blocks...), receipts, chain
|
return append([]*types.Block{parent}, blocks...), receipts, chain
|
||||||
|
@ -17,32 +17,28 @@
|
|||||||
package test_helpers
|
package test_helpers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/testhelpers"
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
"github.com/ipfs/go-block-format"
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
testhelpers "github.com/ethereum/go-ethereum/statediff/test_helpers"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
|
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test variables
|
// Test variables
|
||||||
@ -58,7 +54,7 @@ var (
|
|||||||
Difficulty: big.NewInt(5000000),
|
Difficulty: big.NewInt(5000000),
|
||||||
Extra: []byte{},
|
Extra: []byte{},
|
||||||
}
|
}
|
||||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts()
|
MockTransactions, MockReceipts, SenderAddr = createLegacyTransactionsAndReceipts()
|
||||||
MockUncles = []*types.Header{
|
MockUncles = []*types.Header{
|
||||||
{
|
{
|
||||||
Time: 1,
|
Time: 1,
|
||||||
@ -80,7 +76,7 @@ var (
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
|
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
|
||||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
|
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
|
||||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||||
MockChildHeader = types.Header{
|
MockChildHeader = types.Header{
|
||||||
Time: 0,
|
Time: 0,
|
||||||
@ -96,6 +92,8 @@ var (
|
|||||||
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
|
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
|
||||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||||
|
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
|
||||||
|
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
|
||||||
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
|
||||||
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
|
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
|
||||||
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
|
||||||
@ -103,37 +101,97 @@ var (
|
|||||||
mockTopic12 = common.HexToHash("0x06")
|
mockTopic12 = common.HexToHash("0x06")
|
||||||
mockTopic21 = common.HexToHash("0x05")
|
mockTopic21 = common.HexToHash("0x05")
|
||||||
mockTopic22 = common.HexToHash("0x07")
|
mockTopic22 = common.HexToHash("0x07")
|
||||||
|
mockTopic31 = common.HexToHash("0x08")
|
||||||
|
mockTopic41 = common.HexToHash("0x09")
|
||||||
|
mockTopic42 = common.HexToHash("0x0a")
|
||||||
|
mockTopic43 = common.HexToHash("0x0b")
|
||||||
|
mockTopic51 = common.HexToHash("0x0c")
|
||||||
|
mockTopic61 = common.HexToHash("0x0d")
|
||||||
MockLog1 = &types.Log{
|
MockLog1 = &types.Log{
|
||||||
Address: Address,
|
Address: Address,
|
||||||
Topics: []common.Hash{mockTopic11, mockTopic12},
|
Topics: []common.Hash{mockTopic11, mockTopic12},
|
||||||
Data: []byte{},
|
Data: []byte{},
|
||||||
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
|
TxIndex: 0,
|
||||||
|
Index: 0,
|
||||||
}
|
}
|
||||||
MockLog2 = &types.Log{
|
MockLog2 = &types.Log{
|
||||||
Address: AnotherAddress,
|
Address: AnotherAddress,
|
||||||
Topics: []common.Hash{mockTopic21, mockTopic22},
|
Topics: []common.Hash{mockTopic21, mockTopic22},
|
||||||
Data: []byte{},
|
Data: []byte{},
|
||||||
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
|
TxIndex: 1,
|
||||||
|
Index: 1,
|
||||||
}
|
}
|
||||||
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
|
MockLog3 = &types.Log{
|
||||||
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
|
Address: AnotherAddress1,
|
||||||
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
|
Topics: []common.Hash{mockTopic31},
|
||||||
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
|
Data: []byte{},
|
||||||
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
|
TxIndex: 2,
|
||||||
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256)
|
Index: 2,
|
||||||
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
|
}
|
||||||
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
|
|
||||||
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
|
MockLog4 = &types.Log{
|
||||||
Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
|
Address: AnotherAddress1,
|
||||||
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
|
Topics: []common.Hash{mockTopic41, mockTopic42, mockTopic43},
|
||||||
Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256)
|
Data: []byte{},
|
||||||
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
|
TxIndex: 2,
|
||||||
State1MhKey = shared.MultihashKeyFromCID(State1CID)
|
Index: 3,
|
||||||
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
|
}
|
||||||
State2MhKey = shared.MultihashKeyFromCID(State2CID)
|
MockLog5 = &types.Log{
|
||||||
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
|
Address: AnotherAddress1,
|
||||||
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
|
Topics: []common.Hash{mockTopic51},
|
||||||
MockTrxMeta = []eth.TxModel{
|
Data: []byte{},
|
||||||
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
|
TxIndex: 2,
|
||||||
|
Index: 4,
|
||||||
|
}
|
||||||
|
MockLog6 = &types.Log{
|
||||||
|
Address: AnotherAddress2,
|
||||||
|
Topics: []common.Hash{mockTopic61},
|
||||||
|
Data: []byte{},
|
||||||
|
BlockNumber: BlockNumber.Uint64(),
|
||||||
|
TxIndex: 3,
|
||||||
|
Index: 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
Tx1 = GetTxnRlp(0, MockTransactions)
|
||||||
|
Tx2 = GetTxnRlp(1, MockTransactions)
|
||||||
|
Tx3 = GetTxnRlp(2, MockTransactions)
|
||||||
|
Tx4 = GetTxnRlp(3, MockTransactions)
|
||||||
|
|
||||||
|
rctCIDs, rctIPLDData, _ = eth.GetRctLeafNodeData(MockReceipts)
|
||||||
|
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
|
||||||
|
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
|
||||||
|
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx1, multihash.KECCAK_256)
|
||||||
|
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
|
||||||
|
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx2, multihash.KECCAK_256)
|
||||||
|
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
|
||||||
|
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256)
|
||||||
|
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
|
||||||
|
Trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx4, multihash.KECCAK_256)
|
||||||
|
Trx4MhKey = shared.MultihashKeyFromCID(Trx4CID)
|
||||||
|
Rct1CID = rctCIDs[0]
|
||||||
|
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
|
||||||
|
Rct2CID = rctCIDs[1]
|
||||||
|
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
|
||||||
|
Rct3CID = rctCIDs[2]
|
||||||
|
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
|
||||||
|
Rct4CID = rctCIDs[3]
|
||||||
|
Rct4MhKey = shared.MultihashKeyFromCID(Rct4CID)
|
||||||
|
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
|
||||||
|
State1MhKey = shared.MultihashKeyFromCID(State1CID)
|
||||||
|
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
|
||||||
|
State2MhKey = shared.MultihashKeyFromCID(State2CID)
|
||||||
|
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
|
||||||
|
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
|
||||||
|
Rct1IPLD = rctIPLDData[0]
|
||||||
|
Rct2IPLD = rctIPLDData[1]
|
||||||
|
Rct3IPLD = rctIPLDData[2]
|
||||||
|
Rct4IPLD = rctIPLDData[3]
|
||||||
|
MockTrxMeta = []models.TxModel{
|
||||||
{
|
{
|
||||||
CID: "", // This is empty until we go to publish to ipfs
|
CID: "", // This is empty until we go to publish to ipfs
|
||||||
MhKey: "",
|
MhKey: "",
|
||||||
@ -161,8 +219,17 @@ var (
|
|||||||
TxHash: MockTransactions[2].Hash().String(),
|
TxHash: MockTransactions[2].Hash().String(),
|
||||||
Data: MockContractByteCode,
|
Data: MockContractByteCode,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
CID: "",
|
||||||
|
MhKey: "",
|
||||||
|
Src: SenderAddr.Hex(),
|
||||||
|
Dst: "",
|
||||||
|
Index: 3,
|
||||||
|
TxHash: MockTransactions[3].Hash().String(),
|
||||||
|
Data: []byte{},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
MockTrxMetaPostPublsh = []eth.TxModel{
|
MockTrxMetaPostPublsh = []models.TxModel{
|
||||||
{
|
{
|
||||||
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
|
||||||
MhKey: Trx1MhKey,
|
MhKey: Trx1MhKey,
|
||||||
@ -190,83 +257,67 @@ var (
|
|||||||
TxHash: MockTransactions[2].Hash().String(),
|
TxHash: MockTransactions[2].Hash().String(),
|
||||||
Data: MockContractByteCode,
|
Data: MockContractByteCode,
|
||||||
},
|
},
|
||||||
}
|
|
||||||
MockRctMeta = []eth.ReceiptModel{
|
|
||||||
{
|
{
|
||||||
CID: "",
|
CID: Trx4CID.String(),
|
||||||
MhKey: "",
|
MhKey: Trx4MhKey,
|
||||||
Topic0s: []string{
|
Src: SenderAddr.Hex(),
|
||||||
mockTopic11.String(),
|
Dst: AnotherAddress1.String(),
|
||||||
},
|
Index: 3,
|
||||||
Topic1s: []string{
|
TxHash: MockTransactions[3].Hash().String(),
|
||||||
mockTopic12.String(),
|
Data: []byte{},
|
||||||
},
|
|
||||||
Contract: "",
|
|
||||||
ContractHash: "",
|
|
||||||
LogContracts: []string{
|
|
||||||
Address.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
CID: "",
|
|
||||||
MhKey: "",
|
|
||||||
Topic0s: []string{
|
|
||||||
mockTopic21.String(),
|
|
||||||
},
|
|
||||||
Topic1s: []string{
|
|
||||||
mockTopic22.String(),
|
|
||||||
},
|
|
||||||
Contract: "",
|
|
||||||
ContractHash: "",
|
|
||||||
LogContracts: []string{
|
|
||||||
AnotherAddress.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
CID: "",
|
|
||||||
MhKey: "",
|
|
||||||
Contract: ContractAddress.String(),
|
|
||||||
ContractHash: ContractHash,
|
|
||||||
LogContracts: []string{},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
MockRctMetaPostPublish = []eth.ReceiptModel{
|
MockRctMeta = []models.ReceiptModel{
|
||||||
{
|
{
|
||||||
CID: Rct1CID.String(),
|
LeafCID: "",
|
||||||
MhKey: Rct1MhKey,
|
LeafMhKey: "",
|
||||||
Topic0s: []string{
|
|
||||||
mockTopic11.String(),
|
|
||||||
},
|
|
||||||
Topic1s: []string{
|
|
||||||
mockTopic12.String(),
|
|
||||||
},
|
|
||||||
Contract: "",
|
Contract: "",
|
||||||
ContractHash: "",
|
ContractHash: "",
|
||||||
LogContracts: []string{
|
|
||||||
Address.String(),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: Rct2CID.String(),
|
LeafCID: "",
|
||||||
MhKey: Rct2MhKey,
|
LeafMhKey: "",
|
||||||
Topic0s: []string{
|
|
||||||
mockTopic21.String(),
|
|
||||||
},
|
|
||||||
Topic1s: []string{
|
|
||||||
mockTopic22.String(),
|
|
||||||
},
|
|
||||||
Contract: "",
|
Contract: "",
|
||||||
ContractHash: "",
|
ContractHash: "",
|
||||||
LogContracts: []string{
|
|
||||||
AnotherAddress.String(),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: Rct3CID.String(),
|
LeafCID: "",
|
||||||
MhKey: Rct3MhKey,
|
LeafMhKey: "",
|
||||||
Contract: ContractAddress.String(),
|
Contract: ContractAddress.String(),
|
||||||
ContractHash: ContractHash,
|
ContractHash: ContractHash,
|
||||||
LogContracts: []string{},
|
},
|
||||||
|
{
|
||||||
|
LeafCID: "",
|
||||||
|
LeafMhKey: "",
|
||||||
|
Contract: "",
|
||||||
|
ContractHash: "",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
MockRctMetaPostPublish = []models.ReceiptModel{
|
||||||
|
{
|
||||||
|
LeafCID: Rct1CID.String(),
|
||||||
|
LeafMhKey: Rct1MhKey,
|
||||||
|
Contract: "",
|
||||||
|
ContractHash: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LeafCID: Rct2CID.String(),
|
||||||
|
LeafMhKey: Rct2MhKey,
|
||||||
|
Contract: "",
|
||||||
|
ContractHash: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LeafCID: Rct3CID.String(),
|
||||||
|
LeafMhKey: Rct3MhKey,
|
||||||
|
Contract: ContractAddress.String(),
|
||||||
|
ContractHash: ContractHash,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
LeafCID: Rct4CID.String(),
|
||||||
|
LeafMhKey: Rct4MhKey,
|
||||||
|
Contract: "",
|
||||||
|
ContractHash: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,7 +326,7 @@ var (
|
|||||||
StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
|
StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
|
||||||
StorageValue = crypto.Keccak256([]byte{1, 2, 3, 4, 5})
|
StorageValue = crypto.Keccak256([]byte{1, 2, 3, 4, 5})
|
||||||
StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
||||||
StorageLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
StorageLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||||
StoragePartialPath,
|
StoragePartialPath,
|
||||||
StorageValue,
|
StorageValue,
|
||||||
})
|
})
|
||||||
@ -285,14 +336,14 @@ var (
|
|||||||
ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode)
|
ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode)
|
||||||
contractPath = common.Bytes2Hex([]byte{'\x06'})
|
contractPath = common.Bytes2Hex([]byte{'\x06'})
|
||||||
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
|
||||||
ContractAccount, _ = rlp.EncodeToBytes(state.Account{
|
ContractAccount, _ = rlp.EncodeToBytes(&types.StateAccount{
|
||||||
Nonce: nonce1,
|
Nonce: nonce1,
|
||||||
Balance: big.NewInt(0),
|
Balance: big.NewInt(0),
|
||||||
CodeHash: ContractCodeHash.Bytes(),
|
CodeHash: ContractCodeHash.Bytes(),
|
||||||
Root: common.HexToHash(ContractRoot),
|
Root: common.HexToHash(ContractRoot),
|
||||||
})
|
})
|
||||||
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
|
||||||
ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||||
ContractPartialPath,
|
ContractPartialPath,
|
||||||
ContractAccount,
|
ContractAccount,
|
||||||
})
|
})
|
||||||
@ -303,33 +354,42 @@ var (
|
|||||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
|
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
|
||||||
AccountLeafKey = testhelpers.Account2LeafKey
|
AccountLeafKey = testhelpers.Account2LeafKey
|
||||||
Account, _ = rlp.EncodeToBytes(state.Account{
|
Account, _ = rlp.EncodeToBytes(&types.StateAccount{
|
||||||
Nonce: nonce0,
|
Nonce: nonce0,
|
||||||
Balance: AccountBalance,
|
Balance: AccountBalance,
|
||||||
CodeHash: AccountCodeHash.Bytes(),
|
CodeHash: AccountCodeHash.Bytes(),
|
||||||
Root: common.HexToHash(AccountRoot),
|
Root: common.HexToHash(AccountRoot),
|
||||||
})
|
})
|
||||||
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
|
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
|
||||||
AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
|
AccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{
|
||||||
AccountPartialPath,
|
AccountPartialPath,
|
||||||
Account,
|
Account,
|
||||||
})
|
})
|
||||||
|
|
||||||
MockStateNodes = []eth.TrieNode{
|
MockStateNodes = []sdtypes.StateNode{
|
||||||
{
|
{
|
||||||
LeafKey: common.BytesToHash(ContractLeafKey),
|
LeafKey: ContractLeafKey,
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
Value: ContractLeafNode,
|
NodeValue: ContractLeafNode,
|
||||||
Type: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Leaf,
|
||||||
|
LeafKey: StorageLeafKey,
|
||||||
|
NodeValue: StorageLeafNode,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
LeafKey: common.BytesToHash(AccountLeafKey),
|
LeafKey: AccountLeafKey,
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
Value: AccountLeafNode,
|
NodeValue: AccountLeafNode,
|
||||||
Type: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
|
StorageNodes: []sdtypes.StorageNode{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
MockStateMetaPostPublish = []eth.StateNodeModel{
|
MockStateMetaPostPublish = []models.StateNodeModel{
|
||||||
{
|
{
|
||||||
CID: State1CID.String(),
|
CID: State1CID.String(),
|
||||||
MhKey: State1MhKey,
|
MhKey: State1MhKey,
|
||||||
@ -345,13 +405,13 @@ var (
|
|||||||
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
StateKey: common.BytesToHash(AccountLeafKey).Hex(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
MockStorageNodes = map[string][]eth.TrieNode{
|
MockStorageNodes = map[string][]sdtypes.StorageNode{
|
||||||
contractPath: {
|
contractPath: {
|
||||||
{
|
{
|
||||||
LeafKey: common.BytesToHash(StorageLeafKey),
|
LeafKey: StorageLeafKey,
|
||||||
Value: StorageLeafNode,
|
NodeValue: StorageLeafNode,
|
||||||
Type: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
Path: []byte{},
|
Path: []byte{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -375,16 +435,17 @@ var (
|
|||||||
StateNodes: MockStateNodes,
|
StateNodes: MockStateNodes,
|
||||||
}
|
}
|
||||||
|
|
||||||
MockCIDWrapper = ð2.CIDWrapper{
|
Reward = shared.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts)
|
||||||
|
MockCIDWrapper = ð.CIDWrapper{
|
||||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||||
Header: eth.HeaderModel{
|
Header: models.HeaderModel{
|
||||||
BlockNumber: "1",
|
BlockNumber: "1",
|
||||||
BlockHash: MockBlock.Hash().String(),
|
BlockHash: MockBlock.Hash().String(),
|
||||||
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
CID: HeaderCID.String(),
|
CID: HeaderCID.String(),
|
||||||
MhKey: HeaderMhKey,
|
MhKey: HeaderMhKey,
|
||||||
TotalDifficulty: MockBlock.Difficulty().String(),
|
TotalDifficulty: MockBlock.Difficulty().String(),
|
||||||
Reward: "5312500000000000000",
|
Reward: Reward.String(),
|
||||||
StateRoot: MockBlock.Root().String(),
|
StateRoot: MockBlock.Root().String(),
|
||||||
RctRoot: MockBlock.ReceiptHash().String(),
|
RctRoot: MockBlock.ReceiptHash().String(),
|
||||||
TxRoot: MockBlock.TxHash().String(),
|
TxRoot: MockBlock.TxHash().String(),
|
||||||
@ -392,12 +453,13 @@ var (
|
|||||||
Bloom: MockBlock.Bloom().Bytes(),
|
Bloom: MockBlock.Bloom().Bytes(),
|
||||||
Timestamp: MockBlock.Time(),
|
Timestamp: MockBlock.Time(),
|
||||||
TimesValidated: 1,
|
TimesValidated: 1,
|
||||||
|
Coinbase: "0x0000000000000000000000000000000000000000",
|
||||||
},
|
},
|
||||||
Transactions: MockTrxMetaPostPublsh,
|
Transactions: MockTrxMetaPostPublsh,
|
||||||
Receipts: MockRctMetaPostPublish,
|
Receipts: MockRctMetaPostPublish,
|
||||||
Uncles: []eth.UncleModel{},
|
Uncles: []models.UncleModel{},
|
||||||
StateNodes: MockStateMetaPostPublish,
|
StateNodes: MockStateMetaPostPublish,
|
||||||
StorageNodes: []eth.StorageNodeWithStateKeyModel{
|
StorageNodes: []models.StorageNodeWithStateKeyModel{
|
||||||
{
|
{
|
||||||
Path: []byte{},
|
Path: []byte{},
|
||||||
CID: StorageCID.String(),
|
CID: StorageCID.String(),
|
||||||
@ -410,91 +472,169 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
|
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
|
||||||
Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
|
Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID)
|
||||||
Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
|
Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID)
|
||||||
Trx3IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(2), Trx3CID)
|
Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID)
|
||||||
Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
|
Trx4IPLD, _ = blocks.NewBlockWithCid(Tx4, Trx4CID)
|
||||||
Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
|
|
||||||
Rct3IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(2), Rct3CID)
|
|
||||||
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
|
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
|
||||||
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
|
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
|
||||||
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
|
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
|
||||||
|
|
||||||
MockIPLDs = eth2.IPLDs{
|
MockIPLDs = eth.IPLDs{
|
||||||
BlockNumber: new(big.Int).Set(BlockNumber),
|
BlockNumber: new(big.Int).Set(BlockNumber),
|
||||||
Header: ipfs.BlockModel{
|
Header: models.IPLDModel{
|
||||||
Data: HeaderIPLD.RawData(),
|
Data: HeaderIPLD.RawData(),
|
||||||
CID: HeaderIPLD.Cid().String(),
|
Key: HeaderIPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Transactions: []ipfs.BlockModel{
|
Transactions: []models.IPLDModel{
|
||||||
{
|
{
|
||||||
Data: Trx1IPLD.RawData(),
|
Data: Trx1IPLD.RawData(),
|
||||||
CID: Trx1IPLD.Cid().String(),
|
Key: Trx1IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Trx2IPLD.RawData(),
|
Data: Trx2IPLD.RawData(),
|
||||||
CID: Trx2IPLD.Cid().String(),
|
Key: Trx2IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Trx3IPLD.RawData(),
|
Data: Trx3IPLD.RawData(),
|
||||||
CID: Trx3IPLD.Cid().String(),
|
Key: Trx3IPLD.Cid().String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Data: Trx4IPLD.RawData(),
|
||||||
|
Key: Trx4IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Receipts: []ipfs.BlockModel{
|
Receipts: []models.IPLDModel{
|
||||||
{
|
{
|
||||||
Data: Rct1IPLD.RawData(),
|
Data: Rct1IPLD,
|
||||||
CID: Rct1IPLD.Cid().String(),
|
Key: Rct1CID.String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Rct2IPLD.RawData(),
|
Data: Rct2IPLD,
|
||||||
CID: Rct2IPLD.Cid().String(),
|
Key: Rct2CID.String(),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Data: Rct3IPLD.RawData(),
|
Data: Rct3IPLD,
|
||||||
CID: Rct3IPLD.Cid().String(),
|
Key: Rct3CID.String(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Data: Rct4IPLD,
|
||||||
|
Key: Rct4CID.String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StateNodes: []eth2.StateNode{
|
StateNodes: []eth.StateNode{
|
||||||
{
|
{
|
||||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: State1IPLD.RawData(),
|
Data: State1IPLD.RawData(),
|
||||||
CID: State1IPLD.Cid().String(),
|
Key: State1IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
StateLeafKey: common.BytesToHash(AccountLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: State2IPLD.RawData(),
|
Data: State2IPLD.RawData(),
|
||||||
CID: State2IPLD.Cid().String(),
|
Key: State2IPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
StorageNodes: []eth2.StorageNode{
|
StorageNodes: []eth.StorageNode{
|
||||||
{
|
{
|
||||||
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
StateLeafKey: common.BytesToHash(ContractLeafKey),
|
||||||
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
StorageLeafKey: common.BytesToHash(StorageLeafKey),
|
||||||
Type: sdtypes.Leaf,
|
Type: sdtypes.Leaf,
|
||||||
IPLD: ipfs.BlockModel{
|
IPLD: models.IPLDModel{
|
||||||
Data: StorageIPLD.RawData(),
|
Data: StorageIPLD.RawData(),
|
||||||
CID: StorageIPLD.Cid().String(),
|
Key: StorageIPLD.Cid().String(),
|
||||||
},
|
},
|
||||||
Path: []byte{},
|
Path: []byte{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LondonBlockNum = new(big.Int).Add(BlockNumber, big.NewInt(2))
|
||||||
|
MockLondonHeader = types.Header{
|
||||||
|
Time: 0,
|
||||||
|
Number: LondonBlockNum,
|
||||||
|
Root: common.HexToHash("0x00"),
|
||||||
|
Difficulty: big.NewInt(5000000),
|
||||||
|
Extra: []byte{},
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
}
|
||||||
|
|
||||||
|
MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(LondonBlockNum)
|
||||||
|
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie))
|
||||||
)
|
)
|
||||||
|
|
||||||
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block {
|
||||||
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
|
block := types.NewBlock(header, txs, uncles, receipts, hasher)
|
||||||
|
bHash := block.Hash()
|
||||||
|
for _, r := range receipts {
|
||||||
|
for _, l := range r.Logs {
|
||||||
|
l.BlockHash = bHash
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return block
|
||||||
|
}
|
||||||
|
|
||||||
|
// createDynamicTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||||
|
func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transactions, types.Receipts, common.Address) {
|
||||||
|
// make transactions
|
||||||
|
config := params.TestChainConfig
|
||||||
|
config.LondonBlock = blockNumber
|
||||||
|
trx1 := types.NewTx(&types.DynamicFeeTx{
|
||||||
|
ChainID: config.ChainID,
|
||||||
|
Nonce: 1,
|
||||||
|
GasTipCap: big.NewInt(50),
|
||||||
|
GasFeeCap: big.NewInt(100),
|
||||||
|
Gas: 50,
|
||||||
|
To: &Address,
|
||||||
|
Value: big.NewInt(1000),
|
||||||
|
Data: []byte{},
|
||||||
|
})
|
||||||
|
|
||||||
|
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||||
|
mockCurve := elliptic.P256()
|
||||||
|
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
senderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// make receipts
|
||||||
|
// TODO: Change the receipt type to DynamicFeeTxType once this PR is merged.
|
||||||
|
// https://github.com/ethereum/go-ethereum/pull/22806
|
||||||
|
mockReceipt1 := &types.Receipt{
|
||||||
|
Type: types.DynamicFeeTxType,
|
||||||
|
PostState: common.HexToHash("0x0").Bytes(),
|
||||||
|
Status: types.ReceiptStatusSuccessful,
|
||||||
|
CumulativeGasUsed: 50,
|
||||||
|
Logs: []*types.Log{},
|
||||||
|
TxHash: signedTrx1.Hash(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.Transactions{signedTrx1}, types.Receipts{mockReceipt1}, senderAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// createLegacyTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||||
|
func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
|
||||||
// make transactions
|
// make transactions
|
||||||
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
||||||
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||||
|
trx4 := types.NewTransaction(3, AnotherAddress1, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||||
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
|
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
|
||||||
mockCurve := elliptic.P256()
|
mockCurve := elliptic.P256()
|
||||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||||
@ -513,19 +653,60 @@ func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
signedTrx4, err := types.SignTx(trx4, transactionSigner, mockPrvKey)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
// make receipts
|
// make receipts
|
||||||
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
|
mockReceipt1 := types.NewReceipt(nil, false, 50)
|
||||||
|
|
||||||
|
hash1 := signedTrx1.Hash()
|
||||||
|
MockLog1.TxHash = hash1
|
||||||
|
|
||||||
mockReceipt1.Logs = []*types.Log{MockLog1}
|
mockReceipt1.Logs = []*types.Log{MockLog1}
|
||||||
mockReceipt1.TxHash = signedTrx1.Hash()
|
mockReceipt1.TxHash = hash1
|
||||||
|
mockReceipt1.GasUsed = mockReceipt1.CumulativeGasUsed
|
||||||
|
|
||||||
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
||||||
|
hash2 := signedTrx2.Hash()
|
||||||
|
MockLog2.TxHash = hash2
|
||||||
|
|
||||||
mockReceipt2.Logs = []*types.Log{MockLog2}
|
mockReceipt2.Logs = []*types.Log{MockLog2}
|
||||||
mockReceipt2.TxHash = signedTrx2.Hash()
|
mockReceipt2.TxHash = hash2
|
||||||
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
|
mockReceipt2.GasUsed = mockReceipt2.CumulativeGasUsed - mockReceipt1.CumulativeGasUsed
|
||||||
mockReceipt3.Logs = []*types.Log{}
|
|
||||||
|
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 175)
|
||||||
|
mockReceipt3.Logs = []*types.Log{MockLog3, MockLog4, MockLog5}
|
||||||
mockReceipt3.TxHash = signedTrx3.Hash()
|
mockReceipt3.TxHash = signedTrx3.Hash()
|
||||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, SenderAddr
|
mockReceipt3.GasUsed = mockReceipt3.CumulativeGasUsed - mockReceipt2.CumulativeGasUsed
|
||||||
|
|
||||||
|
// Receipt with failed status.
|
||||||
|
mockReceipt4 := types.NewReceipt(nil, true, 250)
|
||||||
|
mockReceipt4.Logs = []*types.Log{MockLog6}
|
||||||
|
mockReceipt4.TxHash = signedTrx4.Hash()
|
||||||
|
mockReceipt4.GasUsed = mockReceipt4.CumulativeGasUsed - mockReceipt3.CumulativeGasUsed
|
||||||
|
|
||||||
|
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4}, SenderAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetTxnRlp(num int, txs types.Transactions) []byte {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
txs.EncodeIndex(num, buf)
|
||||||
|
tx := make([]byte, buf.Len())
|
||||||
|
copy(tx, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
return tx
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetRctRlp(num int, rcts types.Receipts) []byte {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
rcts.EncodeIndex(num, buf)
|
||||||
|
rct := make([]byte, buf.Len())
|
||||||
|
copy(rct, buf.Bytes())
|
||||||
|
buf.Reset()
|
||||||
|
return rct
|
||||||
}
|
}
|
||||||
|
212
pkg/eth/types.go
212
pkg/eth/types.go
@ -17,32 +17,39 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
|
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
|
||||||
type RPCTransaction struct {
|
type RPCTransaction struct {
|
||||||
BlockHash *common.Hash `json:"blockHash"`
|
BlockHash *common.Hash `json:"blockHash"`
|
||||||
BlockNumber *hexutil.Big `json:"blockNumber"`
|
BlockNumber *hexutil.Big `json:"blockNumber"`
|
||||||
From common.Address `json:"from"`
|
From common.Address `json:"from"`
|
||||||
Gas hexutil.Uint64 `json:"gas"`
|
Gas hexutil.Uint64 `json:"gas"`
|
||||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||||
Hash common.Hash `json:"hash"`
|
GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"`
|
||||||
Input hexutil.Bytes `json:"input"`
|
GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"`
|
||||||
Nonce hexutil.Uint64 `json:"nonce"`
|
Hash common.Hash `json:"hash"`
|
||||||
To *common.Address `json:"to"`
|
Input hexutil.Bytes `json:"input"`
|
||||||
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
|
Nonce hexutil.Uint64 `json:"nonce"`
|
||||||
Value *hexutil.Big `json:"value"`
|
To *common.Address `json:"to"`
|
||||||
V *hexutil.Big `json:"v"`
|
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
|
||||||
R *hexutil.Big `json:"r"`
|
Value *hexutil.Big `json:"value"`
|
||||||
S *hexutil.Big `json:"s"`
|
Type hexutil.Uint64 `json:"type"`
|
||||||
|
Accesses *types.AccessList `json:"accessList,omitempty"`
|
||||||
|
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
||||||
|
V *hexutil.Big `json:"v"`
|
||||||
|
R *hexutil.Big `json:"r"`
|
||||||
|
S *hexutil.Big `json:"s"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCReceipt represents a receipt that will serialize to the RPC representation of a receipt
|
// RPCReceipt represents a receipt that will serialize to the RPC representation of a receipt
|
||||||
@ -82,26 +89,106 @@ type StorageResult struct {
|
|||||||
|
|
||||||
// CallArgs represents the arguments for a call.
|
// CallArgs represents the arguments for a call.
|
||||||
type CallArgs struct {
|
type CallArgs struct {
|
||||||
From *common.Address `json:"from"`
|
From *common.Address `json:"from"`
|
||||||
To *common.Address `json:"to"`
|
To *common.Address `json:"to"`
|
||||||
Gas *hexutil.Uint64 `json:"gas"`
|
Gas *hexutil.Uint64 `json:"gas"`
|
||||||
GasPrice *hexutil.Big `json:"gasPrice"`
|
GasPrice *hexutil.Big `json:"gasPrice"`
|
||||||
Value *hexutil.Big `json:"value"`
|
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
|
||||||
Data *hexutil.Bytes `json:"data"`
|
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
|
||||||
|
Value *hexutil.Big `json:"value"`
|
||||||
|
Data *hexutil.Bytes `json:"data"`
|
||||||
|
AccessList *types.AccessList `json:"accessList,omitempty"`
|
||||||
|
Input *hexutil.Bytes `json:"input"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// account indicates the overriding fields of account during the execution of
|
// from retrieves the transaction sender address.
|
||||||
// a message call.
|
func (arg *CallArgs) from() common.Address {
|
||||||
// Note, state and stateDiff can't be specified at the same time. If state is
|
if arg.From == nil {
|
||||||
// set, message execution will only use the data in the given state. Otherwise
|
return common.Address{}
|
||||||
// if statDiff is set, all diff will be applied first and then execute the call
|
}
|
||||||
// message.
|
return *arg.From
|
||||||
type account struct {
|
}
|
||||||
Nonce *hexutil.Uint64 `json:"nonce"`
|
|
||||||
Code *hexutil.Bytes `json:"code"`
|
// data retrieves the transaction calldata. Input field is preferred.
|
||||||
Balance **hexutil.Big `json:"balance"`
|
func (arg *CallArgs) data() []byte {
|
||||||
State *map[common.Hash]common.Hash `json:"state"`
|
if arg.Input != nil {
|
||||||
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
|
return *arg.Input
|
||||||
|
}
|
||||||
|
if arg.Data != nil {
|
||||||
|
return *arg.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToMessage converts the transaction arguments to the Message type used by the
|
||||||
|
// core evm. This method is used in calls and traces that do not require a real
|
||||||
|
// live transaction.
|
||||||
|
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) {
|
||||||
|
// Reject invalid combinations of pre- and post-1559 fee styles
|
||||||
|
if arg.GasPrice != nil && (arg.MaxFeePerGas != nil || arg.MaxPriorityFeePerGas != nil) {
|
||||||
|
return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
|
||||||
|
}
|
||||||
|
// Set sender address or use zero address if none specified.
|
||||||
|
addr := arg.from()
|
||||||
|
|
||||||
|
// Set default gas & gas price if none were set
|
||||||
|
gas := globalGasCap
|
||||||
|
if gas == 0 {
|
||||||
|
gas = uint64(math.MaxUint64 / 2)
|
||||||
|
}
|
||||||
|
if arg.Gas != nil {
|
||||||
|
gas = uint64(*arg.Gas)
|
||||||
|
}
|
||||||
|
if globalGasCap != 0 && globalGasCap < gas {
|
||||||
|
logrus.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
|
||||||
|
gas = globalGasCap
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
gasPrice *big.Int
|
||||||
|
gasFeeCap *big.Int
|
||||||
|
gasTipCap *big.Int
|
||||||
|
)
|
||||||
|
if baseFee == nil {
|
||||||
|
// If there's no basefee, then it must be a non-1559 execution
|
||||||
|
gasPrice = new(big.Int)
|
||||||
|
if arg.GasPrice != nil {
|
||||||
|
gasPrice = arg.GasPrice.ToInt()
|
||||||
|
}
|
||||||
|
gasFeeCap, gasTipCap = gasPrice, gasPrice
|
||||||
|
} else {
|
||||||
|
// A basefee is provided, necessitating 1559-type execution
|
||||||
|
if arg.GasPrice != nil {
|
||||||
|
// User specified the legacy gas field, convert to 1559 gas typing
|
||||||
|
gasPrice = arg.GasPrice.ToInt()
|
||||||
|
gasFeeCap, gasTipCap = gasPrice, gasPrice
|
||||||
|
} else {
|
||||||
|
// User specified 1559 gas feilds (or none), use those
|
||||||
|
gasFeeCap = new(big.Int)
|
||||||
|
if arg.MaxFeePerGas != nil {
|
||||||
|
gasFeeCap = arg.MaxFeePerGas.ToInt()
|
||||||
|
}
|
||||||
|
gasTipCap = new(big.Int)
|
||||||
|
if arg.MaxPriorityFeePerGas != nil {
|
||||||
|
gasTipCap = arg.MaxPriorityFeePerGas.ToInt()
|
||||||
|
}
|
||||||
|
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
|
||||||
|
gasPrice = new(big.Int)
|
||||||
|
if gasFeeCap.BitLen() > 0 || gasTipCap.BitLen() > 0 {
|
||||||
|
gasPrice = math.BigMin(new(big.Int).Add(gasTipCap, baseFee), gasFeeCap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
value := new(big.Int)
|
||||||
|
if arg.Value != nil {
|
||||||
|
value = arg.Value.ToInt()
|
||||||
|
}
|
||||||
|
data := arg.data()
|
||||||
|
var accessList types.AccessList
|
||||||
|
if arg.AccessList != nil {
|
||||||
|
accessList = *arg.AccessList
|
||||||
|
}
|
||||||
|
msg := types.NewMessage(addr, arg.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true)
|
||||||
|
return msg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
|
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
|
||||||
@ -109,10 +196,10 @@ type account struct {
|
|||||||
type IPLDs struct {
|
type IPLDs struct {
|
||||||
BlockNumber *big.Int
|
BlockNumber *big.Int
|
||||||
TotalDifficulty *big.Int
|
TotalDifficulty *big.Int
|
||||||
Header ipfs.BlockModel
|
Header models.IPLDModel
|
||||||
Uncles []ipfs.BlockModel
|
Uncles []models.IPLDModel
|
||||||
Transactions []ipfs.BlockModel
|
Transactions []models.IPLDModel
|
||||||
Receipts []ipfs.BlockModel
|
Receipts []models.IPLDModel
|
||||||
StateNodes []StateNode
|
StateNodes []StateNode
|
||||||
StorageNodes []StorageNode
|
StorageNodes []StorageNode
|
||||||
}
|
}
|
||||||
@ -121,7 +208,7 @@ type StateNode struct {
|
|||||||
Type sdtypes.NodeType
|
Type sdtypes.NodeType
|
||||||
StateLeafKey common.Hash
|
StateLeafKey common.Hash
|
||||||
Path []byte
|
Path []byte
|
||||||
IPLD ipfs.BlockModel
|
IPLD models.IPLDModel
|
||||||
}
|
}
|
||||||
|
|
||||||
type StorageNode struct {
|
type StorageNode struct {
|
||||||
@ -129,7 +216,7 @@ type StorageNode struct {
|
|||||||
StateLeafKey common.Hash
|
StateLeafKey common.Hash
|
||||||
StorageLeafKey common.Hash
|
StorageLeafKey common.Hash
|
||||||
Path []byte
|
Path []byte
|
||||||
IPLD ipfs.BlockModel
|
IPLD models.IPLDModel
|
||||||
}
|
}
|
||||||
|
|
||||||
// CIDWrapper is used to direct fetching of IPLDs from IPFS
|
// CIDWrapper is used to direct fetching of IPLDs from IPFS
|
||||||
@ -137,10 +224,43 @@ type StorageNode struct {
|
|||||||
// Passed to IPLDFetcher
|
// Passed to IPLDFetcher
|
||||||
type CIDWrapper struct {
|
type CIDWrapper struct {
|
||||||
BlockNumber *big.Int
|
BlockNumber *big.Int
|
||||||
Header eth.HeaderModel
|
Header models.HeaderModel
|
||||||
Uncles []eth.UncleModel
|
Uncles []models.UncleModel
|
||||||
Transactions []eth.TxModel
|
Transactions []models.TxModel
|
||||||
Receipts []eth.ReceiptModel
|
Receipts []models.ReceiptModel
|
||||||
StateNodes []eth.StateNodeModel
|
StateNodes []models.StateNodeModel
|
||||||
StorageNodes []eth.StorageNodeWithStateKeyModel
|
StorageNodes []models.StorageNodeWithStateKeyModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
|
||||||
|
// Returned by PayloadConverter
|
||||||
|
// Passed to IPLDPublisher and ResponseFilterer
|
||||||
|
type ConvertedPayload struct {
|
||||||
|
TotalDifficulty *big.Int
|
||||||
|
Block *types.Block
|
||||||
|
TxMetaData []models.TxModel
|
||||||
|
Receipts types.Receipts
|
||||||
|
ReceiptMetaData []models.ReceiptModel
|
||||||
|
StateNodes []sdtypes.StateNode
|
||||||
|
StorageNodes map[string][]sdtypes.StorageNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogResult represent a log.
|
||||||
|
type LogResult struct {
|
||||||
|
LeafCID string `db:"leaf_cid"`
|
||||||
|
ReceiptID string `db:"rct_id"`
|
||||||
|
Address string `db:"address"`
|
||||||
|
Index int64 `db:"index"`
|
||||||
|
Data []byte `db:"log_data"`
|
||||||
|
Topic0 string `db:"topic0"`
|
||||||
|
Topic1 string `db:"topic1"`
|
||||||
|
Topic2 string `db:"topic2"`
|
||||||
|
Topic3 string `db:"topic3"`
|
||||||
|
LogLeafData []byte `db:"data"`
|
||||||
|
RctCID string `db:"cid"`
|
||||||
|
RctStatus uint64 `db:"post_status"`
|
||||||
|
BlockNumber string `db:"block_number"`
|
||||||
|
BlockHash string `db:"block_hash"`
|
||||||
|
TxnIndex int64 `db:"txn_index"`
|
||||||
|
TxHash string `db:"tx_hash"`
|
||||||
}
|
}
|
||||||
|
267
pkg/graphql/client.go
Normal file
267
pkg/graphql/client.go
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
gqlclient "github.com/machinebox/graphql"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StorageResponse struct {
|
||||||
|
CID string `json:"cid"`
|
||||||
|
Value common.Hash `json:"value"`
|
||||||
|
IpldBlock hexutil.Bytes `json:"ipldBlock"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetStorageAt struct {
|
||||||
|
Response StorageResponse `json:"getStorageAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogResponse struct {
|
||||||
|
Topics []common.Hash `json:"topics"`
|
||||||
|
Data hexutil.Bytes `json:"data"`
|
||||||
|
Transaction TransactionResponse `json:"transaction"`
|
||||||
|
ReceiptCID string `json:"receiptCID"`
|
||||||
|
Status int32 `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransactionResponse struct {
|
||||||
|
Hash common.Hash `json:"hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetLogs struct {
|
||||||
|
Responses []LogResponse `json:"getLogs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPFSBlockResponse struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Data string `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCIDResponse struct {
|
||||||
|
CID string `json:"cid"`
|
||||||
|
TxHash string `json:"txHash"`
|
||||||
|
Index int32 `json:"index"`
|
||||||
|
Src string `json:"src"`
|
||||||
|
Dst string `json:"dst"`
|
||||||
|
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCIDByTxHash struct {
|
||||||
|
Response EthTransactionCIDResponse `json:"ethTransactionCidByTxHash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCIDsByHeaderIdResponse struct {
|
||||||
|
Nodes []EthTransactionCIDResponse `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCIDResponse struct {
|
||||||
|
CID string `json:"cid"`
|
||||||
|
BlockNumber BigInt `json:"blockNumber"`
|
||||||
|
BlockHash string `json:"blockHash"`
|
||||||
|
ParentHash string `json:"parentHash"`
|
||||||
|
Timestamp BigInt `json:"timestamp"`
|
||||||
|
StateRoot string `json:"stateRoot"`
|
||||||
|
Td BigInt `json:"td"`
|
||||||
|
TxRoot string `json:"txRoot"`
|
||||||
|
ReceiptRoot string `json:"receiptRoot"`
|
||||||
|
UncleRoot string `json:"uncleRoot"`
|
||||||
|
Bloom string `json:"bloom"`
|
||||||
|
EthTransactionCIDsByHeaderId EthTransactionCIDsByHeaderIdResponse `json:"ethTransactionCidsByHeaderId"`
|
||||||
|
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AllEthHeaderCIDsResponse struct {
|
||||||
|
Nodes []EthHeaderCIDResponse `json:"nodes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AllEthHeaderCIDs struct {
|
||||||
|
Response AllEthHeaderCIDsResponse `json:"allEthHeaderCids"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client struct {
|
||||||
|
client *gqlclient.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(endpoint string) *Client {
|
||||||
|
client := gqlclient.NewClient(endpoint)
|
||||||
|
return &Client{client: client}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
|
||||||
|
params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
|
||||||
|
if address != nil {
|
||||||
|
params += fmt.Sprintf(`, contract: "%s"`, address.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
getLogsQuery := fmt.Sprintf(`query{
|
||||||
|
getLogs(%s) {
|
||||||
|
data
|
||||||
|
topics
|
||||||
|
transaction {
|
||||||
|
hash
|
||||||
|
}
|
||||||
|
status
|
||||||
|
receiptCID
|
||||||
|
}
|
||||||
|
}`, params)
|
||||||
|
|
||||||
|
req := gqlclient.NewRequest(getLogsQuery)
|
||||||
|
req.Header.Set("Cache-Control", "no-cache")
|
||||||
|
|
||||||
|
var respData map[string]interface{}
|
||||||
|
err := c.client.Run(ctx, req, &respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonStr, err := json.Marshal(respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var logs GetLogs
|
||||||
|
err = json.Unmarshal(jsonStr, &logs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return logs.Responses, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetStorageAt(ctx context.Context, hash common.Hash, address common.Address, slot string) (*StorageResponse, error) {
|
||||||
|
getLogsQuery := fmt.Sprintf(`
|
||||||
|
query{
|
||||||
|
getStorageAt(blockHash: "%s", contract: "%s",slot: "%s") {
|
||||||
|
cid
|
||||||
|
value
|
||||||
|
ipldBlock
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, hash.String(), address.String(), common.HexToHash(slot))
|
||||||
|
|
||||||
|
req := gqlclient.NewRequest(getLogsQuery)
|
||||||
|
req.Header.Set("Cache-Control", "no-cache")
|
||||||
|
|
||||||
|
var respData map[string]interface{}
|
||||||
|
err := c.client.Run(ctx, req, &respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonStr, err := json.Marshal(respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageAt GetStorageAt
|
||||||
|
err = json.Unmarshal(jsonStr, &storageAt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &storageAt.Response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) AllEthHeaderCIDs(ctx context.Context, condition EthHeaderCIDCondition) (*AllEthHeaderCIDsResponse, error) {
|
||||||
|
var params string
|
||||||
|
if condition.BlockHash != nil {
|
||||||
|
params = fmt.Sprintf(`blockHash: "%s"`, *condition.BlockHash)
|
||||||
|
}
|
||||||
|
if condition.BlockNumber != nil {
|
||||||
|
params += fmt.Sprintf(`blockNumber: "%s"`, condition.BlockNumber.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
getHeadersQuery := fmt.Sprintf(`
|
||||||
|
query{
|
||||||
|
allEthHeaderCids(condition: { %s }) {
|
||||||
|
nodes {
|
||||||
|
cid
|
||||||
|
blockNumber
|
||||||
|
blockHash
|
||||||
|
parentHash
|
||||||
|
timestamp
|
||||||
|
stateRoot
|
||||||
|
td
|
||||||
|
txRoot
|
||||||
|
receiptRoot
|
||||||
|
uncleRoot
|
||||||
|
bloom
|
||||||
|
blockByMhKey {
|
||||||
|
key
|
||||||
|
data
|
||||||
|
}
|
||||||
|
ethTransactionCidsByHeaderId {
|
||||||
|
nodes {
|
||||||
|
cid
|
||||||
|
txHash
|
||||||
|
index
|
||||||
|
src
|
||||||
|
dst
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, params)
|
||||||
|
|
||||||
|
req := gqlclient.NewRequest(getHeadersQuery)
|
||||||
|
req.Header.Set("Cache-Control", "no-cache")
|
||||||
|
|
||||||
|
var respData map[string]interface{}
|
||||||
|
err := c.client.Run(ctx, req, &respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonStr, err := json.Marshal(respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var allEthHeaderCIDs AllEthHeaderCIDs
|
||||||
|
err = json.Unmarshal(jsonStr, &allEthHeaderCIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &allEthHeaderCIDs.Response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) EthTransactionCIDByTxHash(ctx context.Context, txHash string) (*EthTransactionCIDResponse, error) {
|
||||||
|
getTxQuery := fmt.Sprintf(`
|
||||||
|
query{
|
||||||
|
ethTransactionCidByTxHash(txHash: "%s") {
|
||||||
|
cid
|
||||||
|
txHash
|
||||||
|
index
|
||||||
|
src
|
||||||
|
dst
|
||||||
|
blockByMhKey {
|
||||||
|
data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, txHash)
|
||||||
|
|
||||||
|
req := gqlclient.NewRequest(getTxQuery)
|
||||||
|
req.Header.Set("Cache-Control", "no-cache")
|
||||||
|
|
||||||
|
var respData map[string]interface{}
|
||||||
|
err := c.client.Run(ctx, req, &respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonStr, err := json.Marshal(respData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ethTxCID EthTransactionCIDByTxHash
|
||||||
|
err = json.Unmarshal(jsonStr, ðTxCID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ðTxCID.Response, nil
|
||||||
|
}
|
@ -18,8 +18,11 @@
|
|||||||
package graphql
|
package graphql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -28,9 +31,11 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/filters"
|
"github.com/ethereum/go-ethereum/eth/filters"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -91,13 +96,19 @@ type Log struct {
|
|||||||
backend *eth.Backend
|
backend *eth.Backend
|
||||||
transaction *Transaction
|
transaction *Transaction
|
||||||
log *types.Log
|
log *types.Log
|
||||||
|
cid string
|
||||||
|
receiptCID string
|
||||||
|
ipldBlock []byte // log leaf node IPLD block data
|
||||||
|
status uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Log) Transaction(ctx context.Context) *Transaction {
|
// Transaction returns transaction that generated this log entry.
|
||||||
|
func (l *Log) Transaction(_ context.Context) *Transaction {
|
||||||
return l.transaction
|
return l.transaction
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
|
// Account returns the contract account which generated this log.
|
||||||
|
func (l *Log) Account(_ context.Context, args BlockNumberArgs) *Account {
|
||||||
return &Account{
|
return &Account{
|
||||||
backend: l.backend,
|
backend: l.backend,
|
||||||
address: l.log.Address,
|
address: l.log.Address,
|
||||||
@ -105,16 +116,39 @@ func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Log) Index(ctx context.Context) int32 {
|
// Index returns the index of this log in the block
|
||||||
|
func (l *Log) Index(_ context.Context) int32 {
|
||||||
return int32(l.log.Index)
|
return int32(l.log.Index)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Log) Topics(ctx context.Context) []common.Hash {
|
// Topics returns the list of 0-4 indexed topics for the log.
|
||||||
|
func (l *Log) Topics(_ context.Context) []common.Hash {
|
||||||
return l.log.Topics
|
return l.log.Topics
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Log) Data(ctx context.Context) hexutil.Bytes {
|
// Data returns data of this log.
|
||||||
return hexutil.Bytes(l.log.Data)
|
func (l *Log) Data(_ context.Context) hexutil.Bytes {
|
||||||
|
return l.log.Data
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cid returns cid of the leaf node of this log.
|
||||||
|
func (l *Log) Cid(_ context.Context) string {
|
||||||
|
return l.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
// IpldBlock returns IPLD block of the leaf node of this log.
|
||||||
|
func (l *Log) IpldBlock(_ context.Context) hexutil.Bytes {
|
||||||
|
return l.ipldBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the status of the receipt IPLD block this Log exists in.
|
||||||
|
func (l *Log) Status(_ context.Context) int32 {
|
||||||
|
return int32(l.status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiptCID returns the receipt CID of the receipt IPLD block this Log exists in.
|
||||||
|
func (l *Log) ReceiptCID(_ context.Context) string {
|
||||||
|
return l.receiptCID
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transaction represents an Ethereum transaction.
|
// Transaction represents an Ethereum transaction.
|
||||||
@ -803,16 +837,20 @@ func (b *Block) Call(ctx context.Context, args struct {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result, gas, failed, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap())
|
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap().Uint64())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
status := hexutil.Uint64(1)
|
status := hexutil.Uint64(1)
|
||||||
if failed {
|
if result.Failed() {
|
||||||
status = 0
|
status = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return &CallResult{
|
return &CallResult{
|
||||||
data: hexutil.Bytes(result),
|
data: result.ReturnData,
|
||||||
gasUsed: hexutil.Uint64(gas),
|
gasUsed: hexutil.Uint64(result.UsedGas),
|
||||||
status: status,
|
status: status,
|
||||||
}, err
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolver is the top-level object in the GraphQL hierarchy.
|
// Resolver is the top-level object in the GraphQL hierarchy.
|
||||||
@ -867,7 +905,11 @@ func (r *Resolver) Blocks(ctx context.Context, args struct {
|
|||||||
if args.To != nil {
|
if args.To != nil {
|
||||||
to = rpc.BlockNumber(*args.To)
|
to = rpc.BlockNumber(*args.To)
|
||||||
} else {
|
} else {
|
||||||
to = rpc.BlockNumber(r.backend.CurrentBlock().Number().Int64())
|
block, err := r.backend.CurrentBlock()
|
||||||
|
if err != nil {
|
||||||
|
return []*Block{}, nil
|
||||||
|
}
|
||||||
|
to = rpc.BlockNumber(block.Number().Int64())
|
||||||
}
|
}
|
||||||
if to < from {
|
if to < from {
|
||||||
return []*Block{}, nil
|
return []*Block{}, nil
|
||||||
@ -940,3 +982,386 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria
|
|||||||
filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
|
filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
|
||||||
return runFilter(ctx, r.backend, filter)
|
return runFilter(ctx, r.backend, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StorageResult represents a storage slot value. All arguments are mandatory.
|
||||||
|
type StorageResult struct {
|
||||||
|
value []byte
|
||||||
|
cid string
|
||||||
|
ipldBlock []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageResult) Value(ctx context.Context) common.Hash {
|
||||||
|
return common.BytesToHash(s.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageResult) Cid(ctx context.Context) string {
|
||||||
|
return s.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageResult) IpldBlock(ctx context.Context) hexutil.Bytes {
|
||||||
|
return hexutil.Bytes(s.ipldBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resolver) GetStorageAt(ctx context.Context, args struct {
|
||||||
|
BlockHash common.Hash
|
||||||
|
Contract common.Address
|
||||||
|
Slot common.Hash
|
||||||
|
}) (*StorageResult, error) {
|
||||||
|
cid, ipldBlock, rlpValue, err := r.backend.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(args.Contract, args.Slot, args.BlockHash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
ret := StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(rlpValue, eth.EmptyNodeValue) {
|
||||||
|
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: ipldBlock}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var value interface{}
|
||||||
|
err = rlp.DecodeBytes(rlpValue, &value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := StorageResult{value: value.([]byte), cid: cid, ipldBlock: ipldBlock}
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resolver) GetLogs(ctx context.Context, args struct {
|
||||||
|
BlockHash common.Hash
|
||||||
|
Contract *common.Address
|
||||||
|
}) (*[]*Log, error) {
|
||||||
|
|
||||||
|
var filter eth.ReceiptFilter
|
||||||
|
if args.Contract != nil {
|
||||||
|
filter.LogAddresses = []string{args.Contract.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Begin tx
|
||||||
|
tx, err := r.backend.DB.Beginx()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rctLog := decomposeGQLLogs(filteredLogs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]*Log, 0, 10)
|
||||||
|
for _, l := range rctLog {
|
||||||
|
ret = append(ret, &Log{
|
||||||
|
backend: r.backend,
|
||||||
|
log: l.Log,
|
||||||
|
cid: l.CID,
|
||||||
|
receiptCID: l.RctCID,
|
||||||
|
ipldBlock: l.LogLeafData,
|
||||||
|
transaction: &Transaction{
|
||||||
|
hash: l.Log.TxHash,
|
||||||
|
},
|
||||||
|
status: l.RctStatus,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type logsCID struct {
|
||||||
|
Log *types.Log
|
||||||
|
CID string
|
||||||
|
RctCID string
|
||||||
|
LogLeafData []byte
|
||||||
|
RctStatus uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// decomposeGQLLogs return logs for graphql.
|
||||||
|
func decomposeGQLLogs(logCIDs []eth.LogResult) []logsCID {
|
||||||
|
logs := make([]logsCID, len(logCIDs))
|
||||||
|
for i, l := range logCIDs {
|
||||||
|
topics := make([]common.Hash, 0)
|
||||||
|
if l.Topic0 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic0))
|
||||||
|
}
|
||||||
|
if l.Topic1 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic1))
|
||||||
|
}
|
||||||
|
if l.Topic2 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic2))
|
||||||
|
}
|
||||||
|
if l.Topic3 != "" {
|
||||||
|
topics = append(topics, common.HexToHash(l.Topic3))
|
||||||
|
}
|
||||||
|
|
||||||
|
logs[i] = logsCID{
|
||||||
|
Log: &types.Log{
|
||||||
|
Address: common.HexToAddress(l.Address),
|
||||||
|
Topics: topics,
|
||||||
|
Data: l.Data,
|
||||||
|
Index: uint(l.Index),
|
||||||
|
TxHash: common.HexToHash(l.TxHash),
|
||||||
|
},
|
||||||
|
CID: l.LeafCID,
|
||||||
|
RctCID: l.RctCID,
|
||||||
|
LogLeafData: l.LogLeafData,
|
||||||
|
RctStatus: l.RctStatus,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return logs
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCID struct {
|
||||||
|
cid string
|
||||||
|
txHash string
|
||||||
|
index int32
|
||||||
|
src string
|
||||||
|
dst string
|
||||||
|
ipfsBlock IPFSBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) Cid(ctx context.Context) string {
|
||||||
|
return t.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) TxHash(ctx context.Context) string {
|
||||||
|
return t.txHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) Index(ctx context.Context) int32 {
|
||||||
|
return t.index
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) Src(ctx context.Context) string {
|
||||||
|
return t.src
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) Dst(ctx context.Context) string {
|
||||||
|
return t.dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t EthTransactionCID) BlockByMhKey(ctx context.Context) IPFSBlock {
|
||||||
|
return t.ipfsBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCIDsConnection struct {
|
||||||
|
nodes []*EthTransactionCID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (transactionCIDResult EthTransactionCIDsConnection) Nodes(ctx context.Context) []*EthTransactionCID {
|
||||||
|
return transactionCIDResult.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPFSBlock struct {
|
||||||
|
key string
|
||||||
|
data string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b IPFSBlock) Key(ctx context.Context) string {
|
||||||
|
return b.key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b IPFSBlock) Data(ctx context.Context) string {
|
||||||
|
return b.data
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCID struct {
|
||||||
|
cid string
|
||||||
|
blockNumber BigInt
|
||||||
|
blockHash string
|
||||||
|
parentHash string
|
||||||
|
timestamp BigInt
|
||||||
|
stateRoot string
|
||||||
|
td BigInt
|
||||||
|
txRoot string
|
||||||
|
receiptRoot string
|
||||||
|
uncleRoot string
|
||||||
|
bloom string
|
||||||
|
transactions []*EthTransactionCID
|
||||||
|
ipfsBlock IPFSBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) Cid(ctx context.Context) string {
|
||||||
|
return h.cid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) BlockNumber(ctx context.Context) BigInt {
|
||||||
|
return h.blockNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) BlockHash(ctx context.Context) string {
|
||||||
|
return h.blockHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) ParentHash(ctx context.Context) string {
|
||||||
|
return h.parentHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) Timestamp(ctx context.Context) BigInt {
|
||||||
|
return h.timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) StateRoot(ctx context.Context) string {
|
||||||
|
return h.stateRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) Td(ctx context.Context) BigInt {
|
||||||
|
return h.td
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) TxRoot(ctx context.Context) string {
|
||||||
|
return h.txRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) ReceiptRoot(ctx context.Context) string {
|
||||||
|
return h.receiptRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) UncleRoot(ctx context.Context) string {
|
||||||
|
return h.uncleRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) Bloom(ctx context.Context) string {
|
||||||
|
return h.bloom
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) EthTransactionCidsByHeaderId(ctx context.Context) EthTransactionCIDsConnection {
|
||||||
|
return EthTransactionCIDsConnection{nodes: h.transactions}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h EthHeaderCID) BlockByMhKey(ctx context.Context) IPFSBlock {
|
||||||
|
return h.ipfsBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCIDsConnection struct {
|
||||||
|
nodes []*EthHeaderCID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (headerCIDResult EthHeaderCIDsConnection) Nodes(ctx context.Context) []*EthHeaderCID {
|
||||||
|
return headerCIDResult.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCIDCondition struct {
|
||||||
|
BlockNumber *BigInt
|
||||||
|
BlockHash *string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
|
||||||
|
Condition *EthHeaderCIDCondition
|
||||||
|
}) (*EthHeaderCIDsConnection, error) {
|
||||||
|
var headerCIDs []eth.HeaderCIDRecord
|
||||||
|
var err error
|
||||||
|
if args.Condition.BlockHash != nil {
|
||||||
|
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
headerCIDs = append(headerCIDs, headerCID)
|
||||||
|
} else if args.Condition.BlockNumber != nil {
|
||||||
|
headerCIDs, err = r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(args.Condition.BlockNumber.ToInt().Int64())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("provide block number or block hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Begin tx
|
||||||
|
tx, err := r.backend.DB.Beginx()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if p := recover(); p != nil {
|
||||||
|
shared.Rollback(tx)
|
||||||
|
panic(p)
|
||||||
|
} else if err != nil {
|
||||||
|
shared.Rollback(tx)
|
||||||
|
} else {
|
||||||
|
err = tx.Commit()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var resultNodes []*EthHeaderCID
|
||||||
|
for _, headerCID := range headerCIDs {
|
||||||
|
var blockNumber BigInt
|
||||||
|
blockNumber.UnmarshalText([]byte(headerCID.BlockNumber))
|
||||||
|
|
||||||
|
var timestamp BigInt
|
||||||
|
timestamp.SetUint64(headerCID.Timestamp)
|
||||||
|
|
||||||
|
var td BigInt
|
||||||
|
td.UnmarshalText([]byte(headerCID.TotalDifficulty))
|
||||||
|
|
||||||
|
ethHeaderCIDNode := EthHeaderCID{
|
||||||
|
cid: headerCID.CID,
|
||||||
|
blockNumber: blockNumber,
|
||||||
|
blockHash: headerCID.BlockHash,
|
||||||
|
parentHash: headerCID.ParentHash,
|
||||||
|
timestamp: timestamp,
|
||||||
|
stateRoot: headerCID.StateRoot,
|
||||||
|
td: td,
|
||||||
|
txRoot: headerCID.TxRoot,
|
||||||
|
receiptRoot: headerCID.RctRoot,
|
||||||
|
uncleRoot: headerCID.UncleRoot,
|
||||||
|
bloom: Bytes(headerCID.Bloom).String(),
|
||||||
|
ipfsBlock: IPFSBlock{
|
||||||
|
key: headerCID.IPLD.Key,
|
||||||
|
data: Bytes(headerCID.IPLD.Data).String(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, txCID := range headerCID.TransactionCIDs {
|
||||||
|
ethHeaderCIDNode.transactions = append(ethHeaderCIDNode.transactions, &EthTransactionCID{
|
||||||
|
cid: txCID.CID,
|
||||||
|
txHash: txCID.TxHash,
|
||||||
|
index: int32(txCID.Index),
|
||||||
|
src: txCID.Src,
|
||||||
|
dst: txCID.Dst,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
resultNodes = append(resultNodes, ðHeaderCIDNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &EthHeaderCIDsConnection{
|
||||||
|
nodes: resultNodes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resolver) EthTransactionCidByTxHash(ctx context.Context, args struct {
|
||||||
|
TxHash string
|
||||||
|
}) (*EthTransactionCID, error) {
|
||||||
|
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &EthTransactionCID{
|
||||||
|
cid: txCID.CID,
|
||||||
|
txHash: txCID.TxHash,
|
||||||
|
index: int32(txCID.Index),
|
||||||
|
src: txCID.Src,
|
||||||
|
dst: txCID.Dst,
|
||||||
|
ipfsBlock: IPFSBlock{
|
||||||
|
key: txCID.IPLD.Key,
|
||||||
|
data: Bytes(txCID.IPLD.Data).String(),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -17,15 +17,318 @@
|
|||||||
package graphql_test
|
package graphql_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/graphql"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
|
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("GraphQL", func() {
|
var _ = Describe("GraphQL", func() {
|
||||||
It("Builds the schema and creates a new handler", func() {
|
const (
|
||||||
_, err := graphql.NewHandler(nil)
|
gqlEndPoint = "127.0.0.1:8083"
|
||||||
|
)
|
||||||
|
var (
|
||||||
|
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
|
||||||
|
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
|
||||||
|
blocks []*types.Block
|
||||||
|
receipts []types.Receipts
|
||||||
|
chain *core.BlockChain
|
||||||
|
db *sqlx.DB
|
||||||
|
blockHashes []common.Hash
|
||||||
|
backend *eth.Backend
|
||||||
|
graphQLServer *graphql.Service
|
||||||
|
chainConfig = params.TestChainConfig
|
||||||
|
mockTD = big.NewInt(1337)
|
||||||
|
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
|
||||||
|
ctx = context.Background()
|
||||||
|
blockHash common.Hash
|
||||||
|
contractAddress common.Address
|
||||||
|
)
|
||||||
|
|
||||||
|
It("test init", func() {
|
||||||
|
var err error
|
||||||
|
db = shared.SetupDB()
|
||||||
|
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
|
|
||||||
|
backend, err = eth.NewEthBackend(db, ð.Config{
|
||||||
|
ChainConfig: chainConfig,
|
||||||
|
VMConfig: vm.Config{},
|
||||||
|
RPCGasCap: big.NewInt(10000000000),
|
||||||
|
GroupCacheConfig: ðServerShared.GroupCacheConfig{
|
||||||
|
StateDB: ethServerShared.GroupConfig{
|
||||||
|
Name: "graphql_test",
|
||||||
|
CacheSizeInMB: 8,
|
||||||
|
CacheExpiryInMins: 60,
|
||||||
|
LogStatsIntervalInSecs: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
// make the test blockchain (and state)
|
||||||
|
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
|
||||||
|
params := statediff.Params{
|
||||||
|
IntermediateStateNodes: true,
|
||||||
|
IntermediateStorageNodes: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||||
|
builder := statediff.NewBuilder(chain.StateCache())
|
||||||
|
for i, block := range blocks {
|
||||||
|
blockHashes = append(blockHashes, block.Hash())
|
||||||
|
var args statediff.Args
|
||||||
|
var rcts types.Receipts
|
||||||
|
if i == 0 {
|
||||||
|
args = statediff.Args{
|
||||||
|
OldStateRoot: common.Hash{},
|
||||||
|
NewStateRoot: block.Root(),
|
||||||
|
BlockNumber: block.Number(),
|
||||||
|
BlockHash: block.Hash(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args = statediff.Args{
|
||||||
|
OldStateRoot: blocks[i-1].Root(),
|
||||||
|
NewStateRoot: block.Root(),
|
||||||
|
BlockNumber: block.Number(),
|
||||||
|
BlockHash: block.Hash(),
|
||||||
|
}
|
||||||
|
rcts = receipts[i-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var diff sdtypes.StateObject
|
||||||
|
diff, err = builder.BuildStateDiffObject(args, params)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
tx, err := transformer.PushBlock(block, rcts, mockTD)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
for _, node := range diff.Nodes {
|
||||||
|
err = transformer.PushStateNode(tx, node, block.Hash().String())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
|
||||||
|
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
|
||||||
|
|
||||||
|
blockHash = test_helpers.MockBlock.Hash()
|
||||||
|
contractAddress = test_helpers.ContractAddr
|
||||||
|
|
||||||
|
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
// The non-canonical header has a child
|
||||||
|
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
ccHash := sdtypes.CodeAndCodeHash{
|
||||||
|
Hash: test_helpers.CodeHash,
|
||||||
|
Code: test_helpers.ContractCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Submit(err)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = graphQLServer.Start(nil)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
defer It("test teardown", func() {
|
||||||
|
err := graphQLServer.Stop()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
shared.TearDownDB(db)
|
||||||
|
chain.Stop()
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("eth_getLogs", func() {
|
||||||
|
It("Retrieves logs that matches the provided blockHash and contract address", func() {
|
||||||
|
logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
expectedLogs := []graphql.LogResponse{
|
||||||
|
{
|
||||||
|
Topics: test_helpers.MockLog1.Topics,
|
||||||
|
Data: hexutil.Bytes(test_helpers.MockLog1.Data),
|
||||||
|
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[0].Hash()},
|
||||||
|
ReceiptCID: test_helpers.Rct1CID.String(),
|
||||||
|
Status: int32(test_helpers.MockReceipts[0].Status),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
Expect(logs).To(Equal(expectedLogs))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
|
||||||
|
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
expectedLogs := []graphql.LogResponse{
|
||||||
|
{
|
||||||
|
Topics: test_helpers.MockLog6.Topics,
|
||||||
|
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
|
||||||
|
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[3].Hash()},
|
||||||
|
ReceiptCID: test_helpers.Rct4CID.String(),
|
||||||
|
Status: int32(test_helpers.MockReceipts[3].Status),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
Expect(logs).To(Equal(expectedLogs))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
|
||||||
|
logs, err := client.GetLogs(ctx, blockHash, nil)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(len(logs)).To(Equal(6))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves logs with random hash", func() {
|
||||||
|
logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(len(logs)).To(Equal(0))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("eth_getStorageAt", func() {
|
||||||
|
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash", func() {
|
||||||
|
storageRes, err := client.GetStorageAt(ctx, blockHashes[2], contractAddress, test_helpers.IndexOne)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.HexToHash("01")))
|
||||||
|
|
||||||
|
storageRes, err = client.GetStorageAt(ctx, blockHashes[3], contractAddress, test_helpers.IndexOne)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.HexToHash("03")))
|
||||||
|
|
||||||
|
storageRes, err = client.GetStorageAt(ctx, blockHashes[4], contractAddress, test_helpers.IndexOne)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.HexToHash("09")))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves empty data if it tries to access a contract at a blockHash which does not exist", func() {
|
||||||
|
storageRes, err := client.GetStorageAt(ctx, blockHashes[0], contractAddress, test_helpers.IndexOne)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||||
|
|
||||||
|
storageRes, err = client.GetStorageAt(ctx, blockHashes[1], contractAddress, test_helpers.IndexOne)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves empty data if it tries to access a contract slot which does not exist", func() {
|
||||||
|
storageRes, err := client.GetStorageAt(ctx, blockHashes[3], contractAddress, randomHash.Hex())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(storageRes.Value).To(Equal(common.Hash{}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("allEthHeaderCids", func() {
|
||||||
|
It("Retrieves header_cids that matches the provided blockNumber", func() {
|
||||||
|
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockNumber: new(graphql.BigInt).SetUint64(2)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
headerCIDs, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
for idx, headerCID := range headerCIDs {
|
||||||
|
ethHeaderCID := allEthHeaderCIDsResp.Nodes[idx]
|
||||||
|
|
||||||
|
compareEthHeaderCID(ethHeaderCID, headerCID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Retrieves header_cids that matches the provided blockHash", func() {
|
||||||
|
blockHash := blocks[1].Hash().String()
|
||||||
|
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockHash: &blockHash})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(len(allEthHeaderCIDsResp.Nodes)).To(Equal(1))
|
||||||
|
ethHeaderCID := allEthHeaderCIDsResp.Nodes[0]
|
||||||
|
compareEthHeaderCID(ethHeaderCID, headerCID)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("ethTransactionCidByTxHash", func() {
|
||||||
|
It("Retrieves tx_cid that matches the provided txHash", func() {
|
||||||
|
txHash := blocks[2].Transactions()[0].Hash().String()
|
||||||
|
ethTransactionCIDResp, err := client.EthTransactionCIDByTxHash(ctx, txHash)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
compareEthTxCID(*ethTransactionCIDResp, txCID)
|
||||||
|
|
||||||
|
Expect(ethTransactionCIDResp.BlockByMhKey.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID eth.HeaderCIDRecord) {
|
||||||
|
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
td, err := strconv.ParseInt(headerCID.TotalDifficulty, 10, 64)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
Expect(ethHeaderCID.CID).To(Equal(headerCID.CID))
|
||||||
|
Expect(ethHeaderCID.BlockNumber).To(Equal(*new(graphql.BigInt).SetUint64(uint64(blockNumber))))
|
||||||
|
Expect(ethHeaderCID.BlockHash).To(Equal(headerCID.BlockHash))
|
||||||
|
Expect(ethHeaderCID.ParentHash).To(Equal(headerCID.ParentHash))
|
||||||
|
Expect(ethHeaderCID.Timestamp).To(Equal(*new(graphql.BigInt).SetUint64(headerCID.Timestamp)))
|
||||||
|
Expect(ethHeaderCID.StateRoot).To(Equal(headerCID.StateRoot))
|
||||||
|
Expect(ethHeaderCID.Td).To(Equal(*new(graphql.BigInt).SetUint64(uint64(td))))
|
||||||
|
Expect(ethHeaderCID.TxRoot).To(Equal(headerCID.TxRoot))
|
||||||
|
Expect(ethHeaderCID.ReceiptRoot).To(Equal(headerCID.RctRoot))
|
||||||
|
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UncleRoot))
|
||||||
|
Expect(ethHeaderCID.Bloom).To(Equal(graphql.Bytes(headerCID.Bloom).String()))
|
||||||
|
|
||||||
|
for tIdx, txCID := range headerCID.TransactionCIDs {
|
||||||
|
ethTxCID := ethHeaderCID.EthTransactionCIDsByHeaderId.Nodes[tIdx]
|
||||||
|
compareEthTxCID(ethTxCID, txCID)
|
||||||
|
}
|
||||||
|
|
||||||
|
Expect(ethHeaderCID.BlockByMhKey.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
|
||||||
|
Expect(ethHeaderCID.BlockByMhKey.Key).To(Equal(headerCID.IPLD.Key))
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareEthTxCID(ethTxCID graphql.EthTransactionCIDResponse, txCID eth.TransactionCIDRecord) {
|
||||||
|
Expect(ethTxCID.CID).To(Equal(txCID.CID))
|
||||||
|
Expect(ethTxCID.TxHash).To(Equal(txCID.TxHash))
|
||||||
|
Expect(ethTxCID.Index).To(Equal(int32(txCID.Index)))
|
||||||
|
Expect(ethTxCID.Src).To(Equal(txCID.Src))
|
||||||
|
Expect(ethTxCID.Dst).To(Equal(txCID.Dst))
|
||||||
|
}
|
||||||
|
@ -25,8 +25,7 @@ const schema string = `
|
|||||||
# An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.
|
# An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.
|
||||||
scalar Bytes
|
scalar Bytes
|
||||||
# BigInt is a large integer. Input is accepted as either a JSON number or as a string.
|
# BigInt is a large integer. Input is accepted as either a JSON number or as a string.
|
||||||
# Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all
|
# Input and output strings may be either decimal or 0x-prefixed hexadecimal depending upon the resolver implementation.
|
||||||
# 0x-prefixed hexadecimal.
|
|
||||||
scalar BigInt
|
scalar BigInt
|
||||||
# Long is a 64 bit unsigned integer.
|
# Long is a 64 bit unsigned integer.
|
||||||
scalar Long
|
scalar Long
|
||||||
@ -65,7 +64,19 @@ const schema string = `
|
|||||||
# Data is unindexed data for this log.
|
# Data is unindexed data for this log.
|
||||||
data: Bytes!
|
data: Bytes!
|
||||||
# Transaction is the transaction that generated this log entry.
|
# Transaction is the transaction that generated this log entry.
|
||||||
transaction: Transaction!
|
transaction: Transaction
|
||||||
|
|
||||||
|
# CID for the leaf node IPLD block of the log.
|
||||||
|
cid: String!
|
||||||
|
|
||||||
|
# ReceiptCID for the Receipt IPLD block this Log exists in.
|
||||||
|
receiptCID: String!
|
||||||
|
|
||||||
|
# IPLD block data for the Log Leaf node.
|
||||||
|
ipldBlock: Bytes!
|
||||||
|
|
||||||
|
# Status of the Receipt IPLD block this Log exists in.
|
||||||
|
status: Int!
|
||||||
}
|
}
|
||||||
|
|
||||||
# Transaction is an Ethereum transaction.
|
# Transaction is an Ethereum transaction.
|
||||||
@ -126,16 +137,16 @@ const schema string = `
|
|||||||
# empty, results will not be filtered by address.
|
# empty, results will not be filtered by address.
|
||||||
addresses: [Address!]
|
addresses: [Address!]
|
||||||
# Topics list restricts matches to particular event topics. Each event has a list
|
# Topics list restricts matches to particular event topics. Each event has a list
|
||||||
# of topics. Topics matches a prefix of that list. An empty element array matches any
|
# of topics. Topics matches a prefix of that list. An empty element array matches any
|
||||||
# topic. Non-empty elements represent an alternative that matches any of the
|
# topic. Non-empty elements represent an alternative that matches any of the
|
||||||
# contained topics.
|
# contained topics.
|
||||||
#
|
#
|
||||||
# Examples:
|
# Examples:
|
||||||
# - [] or nil matches any topic list
|
# - [] or nil matches any topic list
|
||||||
# - [[A]] matches topic A in first position
|
# - [[A]] matches topic A in first position
|
||||||
# - [[], [B]] matches any topic in first position, B in second position
|
# - [[], [B]] matches any topic in first position, B in second position
|
||||||
# - [[A], [B]] matches topic A in first position, B in second position
|
# - [[A], [B]] matches topic A in first position, B in second position
|
||||||
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
|
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
|
||||||
topics: [[Bytes32!]!]
|
topics: [[Bytes32!]!]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,29 +257,98 @@ const schema string = `
|
|||||||
# empty, results will not be filtered by address.
|
# empty, results will not be filtered by address.
|
||||||
addresses: [Address!]
|
addresses: [Address!]
|
||||||
# Topics list restricts matches to particular event topics. Each event has a list
|
# Topics list restricts matches to particular event topics. Each event has a list
|
||||||
# of topics. Topics matches a prefix of that list. An empty element array matches any
|
# of topics. Topics matches a prefix of that list. An empty element array matches any
|
||||||
# topic. Non-empty elements represent an alternative that matches any of the
|
# topic. Non-empty elements represent an alternative that matches any of the
|
||||||
# contained topics.
|
# contained topics.
|
||||||
#
|
#
|
||||||
# Examples:
|
# Examples:
|
||||||
# - [] or nil matches any topic list
|
# - [] or nil matches any topic list
|
||||||
# - [[A]] matches topic A in first position
|
# - [[A]] matches topic A in first position
|
||||||
# - [[], [B]] matches any topic in first position, B in second position
|
# - [[], [B]] matches any topic in first position, B in second position
|
||||||
# - [[A], [B]] matches topic A in first position, B in second position
|
# - [[A], [B]] matches topic A in first position, B in second position
|
||||||
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
|
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
|
||||||
topics: [[Bytes32!]!]
|
topics: [[Bytes32!]!]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Storage trie value with IPLD data.
|
||||||
|
type StorageResult {
|
||||||
|
value: Bytes32!
|
||||||
|
|
||||||
|
# CID for the storage trie IPLD block.
|
||||||
|
cid: String!
|
||||||
|
|
||||||
|
# Storage trie IPLD block.
|
||||||
|
ipldBlock: Bytes!
|
||||||
|
}
|
||||||
|
|
||||||
|
input EthHeaderCidCondition {
|
||||||
|
blockNumber: BigInt
|
||||||
|
blockHash: String
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCid {
|
||||||
|
cid: String!
|
||||||
|
txHash: String!
|
||||||
|
index: Int!
|
||||||
|
src: String!
|
||||||
|
dst: String!
|
||||||
|
blockByMhKey: IPFSBlock!
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthTransactionCidsConnection {
|
||||||
|
nodes: [EthTransactionCid]!
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPFSBlock {
|
||||||
|
key: String!
|
||||||
|
data: String!
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCid {
|
||||||
|
cid: String!
|
||||||
|
blockNumber: BigInt!
|
||||||
|
blockHash: String!
|
||||||
|
parentHash: String!
|
||||||
|
timestamp: BigInt!
|
||||||
|
stateRoot: String!
|
||||||
|
td: BigInt!
|
||||||
|
txRoot: String!
|
||||||
|
receiptRoot: String!
|
||||||
|
uncleRoot: String!
|
||||||
|
bloom: String!
|
||||||
|
ethTransactionCidsByHeaderId: EthTransactionCidsConnection!
|
||||||
|
blockByMhKey: IPFSBlock!
|
||||||
|
}
|
||||||
|
|
||||||
|
type EthHeaderCidsConnection {
|
||||||
|
nodes: [EthHeaderCid]!
|
||||||
|
}
|
||||||
|
|
||||||
type Query {
|
type Query {
|
||||||
# Block fetches an Ethereum block by number or by hash. If neither is
|
# Block fetches an Ethereum block by number or by hash. If neither is
|
||||||
# supplied, the most recent known block is returned.
|
# supplied, the most recent known block is returned.
|
||||||
block(number: Long, hash: Bytes32): Block
|
block(number: Long, hash: Bytes32): Block
|
||||||
|
|
||||||
# Blocks returns all the blocks between two numbers, inclusive. If
|
# Blocks returns all the blocks between two numbers, inclusive. If
|
||||||
# to is not supplied, it defaults to the most recent known block.
|
# to is not supplied, it defaults to the most recent known block.
|
||||||
blocks(from: Long!, to: Long): [Block!]!
|
blocks(from: Long!, to: Long): [Block!]!
|
||||||
|
|
||||||
# Transaction returns a transaction specified by its hash.
|
# Transaction returns a transaction specified by its hash.
|
||||||
transaction(hash: Bytes32!): Transaction
|
transaction(hash: Bytes32!): Transaction
|
||||||
|
|
||||||
# Logs returns log entries matching the provided filter.
|
# Logs returns log entries matching the provided filter.
|
||||||
logs(filter: FilterCriteria!): [Log!]!
|
logs(filter: FilterCriteria!): [Log!]!
|
||||||
|
|
||||||
|
# Get storage slot by block hash and contract address.
|
||||||
|
getStorageAt(blockHash: Bytes32!, contract: Address!, slot: Bytes32!): StorageResult
|
||||||
|
|
||||||
|
# Get contract logs by block hash and contract address.
|
||||||
|
getLogs(blockHash: Bytes32!, contract: Address): [Log!]
|
||||||
|
|
||||||
|
# PostGraphile alternative to get headers with transactions using block number or block hash.
|
||||||
|
allEthHeaderCids(condition: EthHeaderCidCondition): EthHeaderCidsConnection
|
||||||
|
|
||||||
|
# PostGraphile alternative to get transactions using transaction hash.
|
||||||
|
ethTransactionCidByTxHash(txHash: String!): EthTransactionCid
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
"github.com/graph-gophers/graphql-go/relay"
|
"github.com/graph-gophers/graphql-go/relay"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Service encapsulates a GraphQL service.
|
// Service encapsulates a GraphQL service.
|
||||||
@ -69,7 +69,7 @@ func (s *Service) Start(server *p2p.Server) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := node.NewHTTPHandlerStack(s.handler, s.cors, s.vhosts)
|
handler := node.NewHTTPHandlerStack(s.handler, s.cors, s.vhosts, nil)
|
||||||
|
|
||||||
// start http server
|
// start http server
|
||||||
_, addr, err := node.StartHTTPEndpoint(s.endpoint, rpc.DefaultHTTPTimeouts, handler)
|
_, addr, err := node.StartHTTPEndpoint(s.endpoint, rpc.DefaultHTTPTimeouts, handler)
|
||||||
|
122
pkg/graphql/types.go
Normal file
122
pkg/graphql/types.go
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2022 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package graphql
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Bytes marshals as a JSON string with \x prefix.
|
||||||
|
// The empty slice marshals as "\x".
|
||||||
|
type Bytes []byte
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler
|
||||||
|
func (b Bytes) MarshalText() ([]byte, error) {
|
||||||
|
result := make([]byte, len(b)*2+2)
|
||||||
|
copy(result, `\x`)
|
||||||
|
hex.Encode(result[2:], b)
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the hex encoding of b.
|
||||||
|
func (b Bytes) String() string {
|
||||||
|
return b.encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode encodes b as a hex string with "\x" prefix.
|
||||||
|
// This is to make the output to be the same as given by postgraphile.
|
||||||
|
// graphql-go prepends another "\" to the output resulting in prefix "\\x".
|
||||||
|
func (b Bytes) encode() string {
|
||||||
|
result := make([]byte, len(b)*2+2)
|
||||||
|
copy(result, `\x`)
|
||||||
|
hex.Encode(result[2:], b)
|
||||||
|
return string(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
type BigInt big.Int
|
||||||
|
|
||||||
|
// ToInt converts b to a big.Int.
|
||||||
|
func (b *BigInt) ToInt() *big.Int {
|
||||||
|
return (*big.Int)(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns value of b as a decimal string.
|
||||||
|
func (b *BigInt) String() string {
|
||||||
|
return b.ToInt().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUint64 sets b to x and returns x.
|
||||||
|
func (b *BigInt) SetUint64(x uint64) *BigInt {
|
||||||
|
var val big.Int
|
||||||
|
val.SetUint64(x)
|
||||||
|
*b = (BigInt)(val)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler
|
||||||
|
func (b BigInt) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(b.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler
|
||||||
|
func (b *BigInt) UnmarshalText(input []byte) error {
|
||||||
|
raw, err := checkNumberText(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(raw) > 64 {
|
||||||
|
return hexutil.ErrBig256Range
|
||||||
|
}
|
||||||
|
|
||||||
|
var val big.Int
|
||||||
|
val.SetString(string(input[:]), 10)
|
||||||
|
*b = (BigInt)(val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImplementsGraphQLType returns true if BigInt implements the provided GraphQL type.
|
||||||
|
func (b BigInt) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
|
||||||
|
|
||||||
|
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
|
||||||
|
func (b *BigInt) UnmarshalGraphQL(input interface{}) error {
|
||||||
|
var err error
|
||||||
|
switch input := input.(type) {
|
||||||
|
case string:
|
||||||
|
return b.UnmarshalText([]byte(input))
|
||||||
|
case int32:
|
||||||
|
var num big.Int
|
||||||
|
num.SetInt64(int64(input))
|
||||||
|
*b = BigInt(num)
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unexpected type %T for BigInt", input)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkNumberText(input []byte) (raw []byte, err error) {
|
||||||
|
if len(input) == 0 {
|
||||||
|
return nil, nil // empty strings are allowed
|
||||||
|
}
|
||||||
|
if len(input) > 1 && input[0] == '0' {
|
||||||
|
return nil, hexutil.ErrLeadingZero
|
||||||
|
}
|
||||||
|
return input, nil
|
||||||
|
}
|
90
pkg/net/api.go
Normal file
90
pkg/net/api.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIName is the namespace for the watcher's eth api
|
||||||
|
const APIName = "net"
|
||||||
|
|
||||||
|
// APIVersion is the version of the watcher's eth api
|
||||||
|
const APIVersion = "0.0.1"
|
||||||
|
|
||||||
|
// PublicNetAPI is the net nampespace API
|
||||||
|
type PublicNetAPI struct {
|
||||||
|
// Proxy node for forwarding cache misses
|
||||||
|
networkVersion uint64
|
||||||
|
rpc *rpc.Client
|
||||||
|
ethClient *ethclient.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPublicNetAPI creates a new PublicNetAPI with the provided underlying Backend
|
||||||
|
func NewPublicNetAPI(networkID uint64, client *rpc.Client) *PublicNetAPI {
|
||||||
|
var ethClient *ethclient.Client
|
||||||
|
if client != nil {
|
||||||
|
ethClient = ethclient.NewClient(client)
|
||||||
|
}
|
||||||
|
return &PublicNetAPI{
|
||||||
|
networkVersion: networkID,
|
||||||
|
rpc: client,
|
||||||
|
ethClient: ethClient,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listening returns an indication if the node is listening for network connections.
|
||||||
|
func (pna *PublicNetAPI) Listening() bool {
|
||||||
|
// in this case it is actually whether or not the proxied node is listening
|
||||||
|
if pna.rpc != nil {
|
||||||
|
var listening bool
|
||||||
|
if err := pna.rpc.Call(&listening, "net_listening"); err == nil {
|
||||||
|
return listening
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeerCount returns the number of connected peers
|
||||||
|
func (pna *PublicNetAPI) PeerCount() hexutil.Uint {
|
||||||
|
// in this case it is actually the peer count of the proxied node
|
||||||
|
if pna.rpc != nil {
|
||||||
|
var num hexutil.Uint
|
||||||
|
if err := pna.rpc.Call(&num, "net_peerCount"); err == nil {
|
||||||
|
return num
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return hexutil.Uint(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the current ethereum protocol version.
|
||||||
|
func (pna *PublicNetAPI) Version() string {
|
||||||
|
if pna.networkVersion != 0 {
|
||||||
|
return fmt.Sprintf("%d", pna.networkVersion)
|
||||||
|
}
|
||||||
|
if pna.rpc != nil {
|
||||||
|
var version string
|
||||||
|
if err := pna.rpc.Call(&version, "net_version"); err == nil {
|
||||||
|
return version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
47
pkg/net/api_test.go
Normal file
47
pkg/net/api_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package net_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("API", func() {
|
||||||
|
var (
|
||||||
|
api *net.PublicNetAPI
|
||||||
|
)
|
||||||
|
BeforeEach(func() {
|
||||||
|
api = net.NewPublicNetAPI(1, nil)
|
||||||
|
})
|
||||||
|
Describe("net_listening", func() {
|
||||||
|
It("Retrieves whether or not the node is listening to the p2p network", func() {
|
||||||
|
listening := api.Listening()
|
||||||
|
Expect(listening).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Describe("net_version", func() {
|
||||||
|
It("Retrieves the network id", func() {
|
||||||
|
version := api.Version()
|
||||||
|
Expect(version).To(Equal("1"))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
// TODO: test PeerCount with mock proxy node
|
||||||
|
})
|
35
pkg/net/net_suite_test.go
Normal file
35
pkg/net/net_suite_test.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package net_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNetSuite(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "eth ipld server net suite test")
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = BeforeSuite(func() {
|
||||||
|
logrus.SetOutput(ioutil.Discard)
|
||||||
|
})
|
@ -29,11 +29,11 @@ import (
|
|||||||
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
||||||
|
|
||||||
srv := rpc.NewServer()
|
srv := rpc.NewServer()
|
||||||
err := node.RegisterApisFromWhitelist(apis, modules, srv, false)
|
err := node.RegisterApis(apis, modules, srv, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Could not register HTTP API: %w", err)
|
utils.Fatalf("Could not register HTTP API: %w", err)
|
||||||
}
|
}
|
||||||
handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
|
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
|
||||||
|
|
||||||
// start http server
|
// start http server
|
||||||
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
|
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
|
||||||
@ -41,7 +41,7 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors [
|
|||||||
utils.Fatalf("Could not start RPC api: %v", err)
|
utils.Fatalf("Could not start RPC api: %v", err)
|
||||||
}
|
}
|
||||||
extapiURL := fmt.Sprintf("http://%v/", addr)
|
extapiURL := fmt.Sprintf("http://%v/", addr)
|
||||||
log.Info("HTTP endpoint opened", "url", extapiURL)
|
log.Infof("HTTP endpoint opened %s", extapiURL)
|
||||||
|
|
||||||
return srv, err
|
return srv, err
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/prom"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/prom"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartWSEndpoint starts a websocket endpoint.
|
// StartWSEndpoint starts a websocket endpoint.
|
||||||
@ -37,7 +37,7 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
|
|||||||
|
|
||||||
// Register all the APIs exposed by the services
|
// Register all the APIs exposed by the services
|
||||||
handler := rpc.NewServer()
|
handler := rpc.NewServer()
|
||||||
err = node.RegisterApisFromWhitelist(apis, modules, handler, exposeAll)
|
err = node.RegisterApis(apis, modules, handler, exposeAll)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Could not register WS API: %w", err)
|
utils.Fatalf("Could not register WS API: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -19,14 +19,11 @@ package serve
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/types"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
v "github.com/vulcanize/ipld-eth-server/version"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace used for the state diffing service API
|
// APIName is the namespace used for the state diffing service API
|
||||||
@ -37,13 +34,15 @@ const APIVersion = "0.0.1"
|
|||||||
|
|
||||||
// PublicServerAPI is the public api for the watcher
|
// PublicServerAPI is the public api for the watcher
|
||||||
type PublicServerAPI struct {
|
type PublicServerAPI struct {
|
||||||
w Server
|
w Server
|
||||||
|
rpc *rpc.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublicServerAPI creates a new PublicServerAPI with the provided underlying Server process
|
// NewPublicServerAPI creates a new PublicServerAPI with the provided underlying Server process
|
||||||
func NewPublicServerAPI(w Server) *PublicServerAPI {
|
func NewPublicServerAPI(w Server, client *rpc.Client) *PublicServerAPI {
|
||||||
return &PublicServerAPI{
|
return &PublicServerAPI{
|
||||||
w: w,
|
w: w,
|
||||||
|
rpc: client,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,36 +85,12 @@ func (api *PublicServerAPI) Stream(ctx context.Context, params eth.SubscriptionS
|
|||||||
return rpcSub, nil
|
return rpcSub, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chain returns the chain type that this watcher instance supports
|
// WatchAddress makes a geth WatchAddress API call with the given operation and args
|
||||||
func (api *PublicServerAPI) Chain() shared.ChainType {
|
func (api *PublicServerAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error {
|
||||||
return shared.Ethereum
|
err := api.rpc.Call(nil, "statediff_watchAddress", operation, args)
|
||||||
}
|
if err != nil {
|
||||||
|
return err
|
||||||
// Struct for holding watcher meta data
|
|
||||||
type InfoAPI struct{}
|
|
||||||
|
|
||||||
// NewInfoAPI creates a new InfoAPI
|
|
||||||
func NewInfoAPI() *InfoAPI {
|
|
||||||
return &InfoAPI{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Modules returns modules supported by this api
|
|
||||||
func (iapi *InfoAPI) Modules() map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
"vdb": "Stream",
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// NodeInfo gathers and returns a collection of metadata for the watcher
|
return nil
|
||||||
func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
|
|
||||||
return &p2p.NodeInfo{
|
|
||||||
// TODO: formalize this
|
|
||||||
ID: "vulcanizeDB",
|
|
||||||
Name: "ipld-eth-server",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Version returns the version of the watcher
|
|
||||||
func (iapi *InfoAPI) Version() string {
|
|
||||||
return v.VersionWithMeta
|
|
||||||
}
|
}
|
||||||
|
@ -17,23 +17,23 @@
|
|||||||
package serve
|
package serve
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"time"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/utils"
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/prom"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
|
||||||
|
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Env variables
|
// Env variables
|
||||||
@ -48,21 +48,53 @@ const (
|
|||||||
|
|
||||||
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
|
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
|
||||||
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
|
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
|
||||||
|
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
|
||||||
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
|
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
|
||||||
|
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
|
||||||
|
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
|
||||||
|
|
||||||
|
VALIDATOR_ENABLED = "VALIDATOR_ENABLED"
|
||||||
|
VALIDATOR_EVERY_NTH_BLOCK = "VALIDATOR_EVERY_NTH_BLOCK"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config struct
|
// Config struct
|
||||||
type Config struct {
|
type Config struct {
|
||||||
DB *postgres.DB
|
DB *sqlx.DB
|
||||||
DBConfig postgres.Config
|
DBConfig postgres.Config
|
||||||
WSEndpoint string
|
|
||||||
HTTPEndpoint string
|
WSEnabled bool
|
||||||
IPCEndpoint string
|
WSEndpoint string
|
||||||
|
|
||||||
|
HTTPEnabled bool
|
||||||
|
HTTPEndpoint string
|
||||||
|
|
||||||
|
IPCEnabled bool
|
||||||
|
IPCEndpoint string
|
||||||
|
|
||||||
|
EthGraphqlEnabled bool
|
||||||
|
EthGraphqlEndpoint string
|
||||||
|
|
||||||
|
IpldGraphqlEnabled bool
|
||||||
|
IpldGraphqlEndpoint string
|
||||||
|
IpldPostgraphileEndpoint string
|
||||||
|
TracingHttpEndpoint string
|
||||||
|
TracingPostgraphileEndpoint string
|
||||||
|
|
||||||
ChainConfig *params.ChainConfig
|
ChainConfig *params.ChainConfig
|
||||||
DefaultSender *common.Address
|
DefaultSender *common.Address
|
||||||
RPCGasCap *big.Int
|
RPCGasCap *big.Int
|
||||||
|
EthHttpEndpoint string
|
||||||
Client *rpc.Client
|
Client *rpc.Client
|
||||||
SupportStateDiff bool
|
SupportStateDiff bool
|
||||||
|
ForwardEthCalls bool
|
||||||
|
ProxyOnError bool
|
||||||
|
NodeNetworkID string
|
||||||
|
|
||||||
|
// Cache configuration.
|
||||||
|
GroupCache *ethServerShared.GroupCacheConfig
|
||||||
|
|
||||||
|
StateValidationEnabled bool
|
||||||
|
StateValidationEveryNthBlock uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig is used to initialize a watcher config from a .toml file
|
// NewConfig is used to initialize a watcher config from a .toml file
|
||||||
@ -70,47 +102,112 @@ type Config struct {
|
|||||||
func NewConfig() (*Config, error) {
|
func NewConfig() (*Config, error) {
|
||||||
c := new(Config)
|
c := new(Config)
|
||||||
|
|
||||||
viper.BindEnv("server.wsPath", SERVER_WS_PATH)
|
viper.BindEnv("ethereum.httpPath", ETH_HTTP_PATH)
|
||||||
viper.BindEnv("server.ipcPath", SERVER_IPC_PATH)
|
|
||||||
viper.BindEnv("server.httpPath", SERVER_HTTP_PATH)
|
|
||||||
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
|
|
||||||
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR)
|
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR)
|
||||||
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP)
|
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP)
|
||||||
|
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG)
|
||||||
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF)
|
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF)
|
||||||
|
viper.BindEnv("ethereum.forwardEthCalls", ETH_FORWARD_ETH_CALLS)
|
||||||
|
viper.BindEnv("ethereum.proxyOnError", ETH_PROXY_ON_ERROR)
|
||||||
|
|
||||||
c.DBConfig.Init()
|
c.dbInit()
|
||||||
|
|
||||||
ethHTTP := viper.GetString("ethereum.httpPath")
|
ethHTTP := viper.GetString("ethereum.httpPath")
|
||||||
nodeInfo, cli, err := shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
|
ethHTTPEndpoint := fmt.Sprintf("http://%s", ethHTTP)
|
||||||
|
nodeInfo, cli, err := getEthNodeAndClient(ethHTTPEndpoint)
|
||||||
|
c.NodeNetworkID = nodeInfo.NetworkID
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.Client = cli
|
c.Client = cli
|
||||||
c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff")
|
c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff")
|
||||||
|
c.ForwardEthCalls = viper.GetBool("ethereum.forwardEthCalls")
|
||||||
|
c.ProxyOnError = viper.GetBool("ethereum.proxyOnError")
|
||||||
|
c.EthHttpEndpoint = ethHTTPEndpoint
|
||||||
|
|
||||||
wsPath := viper.GetString("server.wsPath")
|
// websocket server
|
||||||
if wsPath == "" {
|
wsEnabled := viper.GetBool("eth.server.ws")
|
||||||
wsPath = "127.0.0.1:8080"
|
if wsEnabled {
|
||||||
}
|
wsPath := viper.GetString("eth.server.wsPath")
|
||||||
c.WSEndpoint = wsPath
|
if wsPath == "" {
|
||||||
ipcPath := viper.GetString("server.ipcPath")
|
wsPath = "127.0.0.1:8080"
|
||||||
if ipcPath == "" {
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
|
c.WSEndpoint = wsPath
|
||||||
}
|
}
|
||||||
c.IPCEndpoint = ipcPath
|
c.WSEnabled = wsEnabled
|
||||||
httpPath := viper.GetString("server.httpPath")
|
|
||||||
if httpPath == "" {
|
// ipc server
|
||||||
httpPath = "127.0.0.1:8081"
|
ipcEnabled := viper.GetBool("eth.server.ipc")
|
||||||
|
if ipcEnabled {
|
||||||
|
ipcPath := viper.GetString("eth.server.ipcPath")
|
||||||
|
if ipcPath == "" {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
|
||||||
|
}
|
||||||
|
c.IPCEndpoint = ipcPath
|
||||||
}
|
}
|
||||||
c.HTTPEndpoint = httpPath
|
c.IPCEnabled = ipcEnabled
|
||||||
|
|
||||||
|
// http server
|
||||||
|
httpEnabled := viper.GetBool("eth.server.http")
|
||||||
|
if httpEnabled {
|
||||||
|
httpPath := viper.GetString("eth.server.httpPath")
|
||||||
|
if httpPath == "" {
|
||||||
|
httpPath = "127.0.0.1:8081"
|
||||||
|
}
|
||||||
|
c.HTTPEndpoint = httpPath
|
||||||
|
}
|
||||||
|
c.HTTPEnabled = httpEnabled
|
||||||
|
|
||||||
|
// eth graphql endpoint
|
||||||
|
ethGraphqlEnabled := viper.GetBool("eth.server.graphql")
|
||||||
|
if ethGraphqlEnabled {
|
||||||
|
ethGraphqlPath := viper.GetString("eth.server.graphqlPath")
|
||||||
|
if ethGraphqlPath == "" {
|
||||||
|
ethGraphqlPath = "127.0.0.1:8082"
|
||||||
|
}
|
||||||
|
c.EthGraphqlEndpoint = ethGraphqlPath
|
||||||
|
}
|
||||||
|
c.EthGraphqlEnabled = ethGraphqlEnabled
|
||||||
|
|
||||||
|
// ipld graphql endpoint
|
||||||
|
ipldGraphqlEnabled := viper.GetBool("ipld.server.graphql")
|
||||||
|
if ipldGraphqlEnabled {
|
||||||
|
ipldGraphqlPath := viper.GetString("ipld.server.graphqlPath")
|
||||||
|
if ipldGraphqlPath == "" {
|
||||||
|
ipldGraphqlPath = "127.0.0.1:8083"
|
||||||
|
}
|
||||||
|
c.IpldGraphqlEndpoint = ipldGraphqlPath
|
||||||
|
|
||||||
|
ipldPostgraphilePath := viper.GetString("ipld.postgraphilePath")
|
||||||
|
if ipldPostgraphilePath == "" {
|
||||||
|
return nil, errors.New("ipld-postgraphile-path parameter is empty")
|
||||||
|
}
|
||||||
|
c.IpldPostgraphileEndpoint = ipldPostgraphilePath
|
||||||
|
|
||||||
|
tracingHttpEndpoint := viper.GetString("tracing.httpPath")
|
||||||
|
tracingPostgraphilePath := viper.GetString("tracing.postgraphilePath")
|
||||||
|
|
||||||
|
// these two parameters either can be both empty or both set
|
||||||
|
if (tracingHttpEndpoint == "" && tracingPostgraphilePath != "") || (tracingHttpEndpoint != "" && tracingPostgraphilePath == "") {
|
||||||
|
return nil, errors.New("tracing.httpPath and tracing.postgraphilePath parameters either can be both empty or both set")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.TracingHttpEndpoint = tracingHttpEndpoint
|
||||||
|
c.TracingPostgraphileEndpoint = tracingPostgraphilePath
|
||||||
|
}
|
||||||
|
c.IpldGraphqlEnabled = ipldGraphqlEnabled
|
||||||
|
|
||||||
overrideDBConnConfig(&c.DBConfig)
|
overrideDBConnConfig(&c.DBConfig)
|
||||||
serveDB := utils.LoadPostgres(c.DBConfig, nodeInfo, false)
|
serveDB, err := ethServerShared.NewDB(c.DBConfig.DbConnectionString(), c.DBConfig)
|
||||||
prom.RegisterDBCollector(c.DBConfig.Name, serveDB.DB)
|
if err != nil {
|
||||||
c.DB = &serveDB
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
prom.RegisterDBCollector(c.DBConfig.DatabaseName, serveDB)
|
||||||
|
c.DB = serveDB
|
||||||
|
|
||||||
defaultSenderStr := viper.GetString("ethereum.defaultSender")
|
defaultSenderStr := viper.GetString("ethereum.defaultSender")
|
||||||
if defaultSenderStr != "" {
|
if defaultSenderStr != "" {
|
||||||
@ -123,7 +220,17 @@ func NewConfig() (*Config, error) {
|
|||||||
c.RPCGasCap = rpcGasCap
|
c.RPCGasCap = rpcGasCap
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.ChainConfig, err = eth.ChainConfig(nodeInfo.ChainID)
|
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||||
|
if chainConfigPath != "" {
|
||||||
|
c.ChainConfig, err = statediff.LoadConfig(chainConfigPath)
|
||||||
|
} else {
|
||||||
|
c.ChainConfig, err = statediff.ChainConfig(nodeInfo.ChainID)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.loadGroupCacheConfig()
|
||||||
|
|
||||||
|
c.loadValidatorConfig()
|
||||||
|
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,6 +239,57 @@ func overrideDBConnConfig(con *postgres.Config) {
|
|||||||
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS)
|
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS)
|
||||||
viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME)
|
viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME)
|
||||||
con.MaxIdle = viper.GetInt("database.server.maxIdle")
|
con.MaxIdle = viper.GetInt("database.server.maxIdle")
|
||||||
con.MaxOpen = viper.GetInt("database.server.maxOpen")
|
con.MaxConns = viper.GetInt("database.server.maxOpen")
|
||||||
con.MaxLifetime = viper.GetInt("database.server.maxLifetime")
|
con.MaxConnLifetime = time.Duration(viper.GetInt("database.server.maxLifetime"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) dbInit() {
|
||||||
|
viper.BindEnv("database.name", DATABASE_NAME)
|
||||||
|
viper.BindEnv("database.hostname", DATABASE_HOSTNAME)
|
||||||
|
viper.BindEnv("database.port", DATABASE_PORT)
|
||||||
|
viper.BindEnv("database.user", DATABASE_USER)
|
||||||
|
viper.BindEnv("database.password", DATABASE_PASSWORD)
|
||||||
|
viper.BindEnv("database.maxIdle", DATABASE_MAX_IDLE_CONNECTIONS)
|
||||||
|
viper.BindEnv("database.maxOpen", DATABASE_MAX_OPEN_CONNECTIONS)
|
||||||
|
viper.BindEnv("database.maxLifetime", DATABASE_MAX_CONN_LIFETIME)
|
||||||
|
|
||||||
|
c.DBConfig.DatabaseName = viper.GetString("database.name")
|
||||||
|
c.DBConfig.Hostname = viper.GetString("database.hostname")
|
||||||
|
c.DBConfig.Port = viper.GetInt("database.port")
|
||||||
|
c.DBConfig.Username = viper.GetString("database.user")
|
||||||
|
c.DBConfig.Password = viper.GetString("database.password")
|
||||||
|
c.DBConfig.MaxIdle = viper.GetInt("database.maxIdle")
|
||||||
|
c.DBConfig.MaxConns = viper.GetInt("database.maxOpen")
|
||||||
|
c.DBConfig.MaxConnLifetime = time.Duration(viper.GetInt("database.maxLifetime"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) loadGroupCacheConfig() {
|
||||||
|
viper.BindEnv("groupcache.pool.enabled", ethServerShared.GcachePoolEnabled)
|
||||||
|
viper.BindEnv("groupcache.pool.httpEndpoint", ethServerShared.GcachePoolHttpPath)
|
||||||
|
viper.BindEnv("groupcache.pool.peerHttpEndpoints", ethServerShared.GcachePoolHttpPeers)
|
||||||
|
viper.BindEnv("groupcache.statedb.cacheSizeInMB", ethServerShared.GcacheStatedbCacheSize)
|
||||||
|
viper.BindEnv("groupcache.statedb.cacheExpiryInMins", ethServerShared.GcacheStatedbCacheExpiry)
|
||||||
|
viper.BindEnv("groupcache.statedb.logStatsIntervalInSecs", ethServerShared.GcacheStatedbLogStatsInterval)
|
||||||
|
|
||||||
|
gcc := ethServerShared.GroupCacheConfig{}
|
||||||
|
gcc.Pool.Enabled = viper.GetBool("groupcache.pool.enabled")
|
||||||
|
if gcc.Pool.Enabled {
|
||||||
|
gcc.Pool.HttpEndpoint = viper.GetString("groupcache.pool.httpEndpoint")
|
||||||
|
gcc.Pool.PeerHttpEndpoints = viper.GetStringSlice("groupcache.pool.peerHttpEndpoints")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Irrespective of whether the pool is enabled, we always use the hot/local cache.
|
||||||
|
gcc.StateDB.CacheSizeInMB = viper.GetInt("groupcache.statedb.cacheSizeInMB")
|
||||||
|
gcc.StateDB.CacheExpiryInMins = viper.GetInt("groupcache.statedb.cacheExpiryInMins")
|
||||||
|
gcc.StateDB.LogStatsIntervalInSecs = viper.GetInt("groupcache.statedb.logStatsIntervalInSecs")
|
||||||
|
|
||||||
|
c.GroupCache = &gcc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) loadValidatorConfig() {
|
||||||
|
viper.BindEnv("validator.enabled", VALIDATOR_ENABLED)
|
||||||
|
viper.BindEnv("validator.everyNthBlock", VALIDATOR_EVERY_NTH_BLOCK)
|
||||||
|
|
||||||
|
c.StateValidationEnabled = viper.GetBool("validator.enabled")
|
||||||
|
c.StateValidationEveryNthBlock = viper.GetUint64("validator.everyNthBlock")
|
||||||
}
|
}
|
||||||
|
50
pkg/serve/env.go
Normal file
50
pkg/serve/env.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Env variables
|
||||||
|
const (
|
||||||
|
HTTP_TIMEOUT = "HTTP_TIMEOUT"
|
||||||
|
|
||||||
|
ETH_WS_PATH = "ETH_WS_PATH"
|
||||||
|
ETH_HTTP_PATH = "ETH_HTTP_PATH"
|
||||||
|
ETH_NODE_ID = "ETH_NODE_ID"
|
||||||
|
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
||||||
|
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
||||||
|
ETH_NETWORK_ID = "ETH_NETWORK_ID"
|
||||||
|
ETH_CHAIN_ID = "ETH_CHAIN_ID"
|
||||||
|
|
||||||
|
DATABASE_NAME = "DATABASE_NAME"
|
||||||
|
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||||
|
DATABASE_PORT = "DATABASE_PORT"
|
||||||
|
DATABASE_USER = "DATABASE_USER"
|
||||||
|
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||||
|
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
||||||
|
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
||||||
|
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEthNodeAndClient returns eth node info and client from path url
|
||||||
|
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
|
||||||
|
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
|
||||||
|
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
|
||||||
|
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
|
||||||
|
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
|
||||||
|
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
|
||||||
|
|
||||||
|
rpcClient, err := rpc.Dial(path)
|
||||||
|
if err != nil {
|
||||||
|
return node.Info{}, nil, err
|
||||||
|
}
|
||||||
|
return node.Info{
|
||||||
|
ID: viper.GetString("ethereum.nodeID"),
|
||||||
|
ClientName: viper.GetString("ethereum.clientName"),
|
||||||
|
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
|
||||||
|
NetworkID: viper.GetString("ethereum.networkID"),
|
||||||
|
ChainID: viper.GetUint64("ethereum.chainID"),
|
||||||
|
}, rpcClient, nil
|
||||||
|
}
|
@ -18,22 +18,21 @@ package serve
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
ethnode "github.com/ethereum/go-ethereum/node"
|
ethnode "github.com/ethereum/go-ethereum/node"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
|
"github.com/vulcanize/ipld-eth-server/v3/pkg/net"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-server/pkg/eth"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -49,7 +48,7 @@ type Server interface {
|
|||||||
APIs() []rpc.API
|
APIs() []rpc.API
|
||||||
Protocols() []p2p.Protocol
|
Protocols() []p2p.Protocol
|
||||||
// Pub-Sub handling event loop
|
// Pub-Sub handling event loop
|
||||||
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload)
|
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload)
|
||||||
// Method to subscribe to the service
|
// Method to subscribe to the service
|
||||||
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
|
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
|
||||||
// Method to unsubscribe from the service
|
// Method to unsubscribe from the service
|
||||||
@ -75,7 +74,7 @@ type Service struct {
|
|||||||
// A mapping of subscription params hash to the corresponding subscription params
|
// A mapping of subscription params hash to the corresponding subscription params
|
||||||
SubscriptionTypes map[common.Hash]eth.SubscriptionSettings
|
SubscriptionTypes map[common.Hash]eth.SubscriptionSettings
|
||||||
// Underlying db
|
// Underlying db
|
||||||
db *postgres.DB
|
db *sqlx.DB
|
||||||
// wg for syncing serve processes
|
// wg for syncing serve processes
|
||||||
serveWg *sync.WaitGroup
|
serveWg *sync.WaitGroup
|
||||||
// rpc client for forwarding cache misses
|
// rpc client for forwarding cache misses
|
||||||
@ -84,6 +83,12 @@ type Service struct {
|
|||||||
supportsStateDiffing bool
|
supportsStateDiffing bool
|
||||||
// backend for the server
|
// backend for the server
|
||||||
backend *eth.Backend
|
backend *eth.Backend
|
||||||
|
// whether to forward eth_calls directly to proxy node
|
||||||
|
forwardEthCalls bool
|
||||||
|
// whether to forward all calls to proxy node if they throw an error locally
|
||||||
|
proxyOnError bool
|
||||||
|
// eth node network id
|
||||||
|
nodeNetworkId string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates a new Server using an underlying Service struct
|
// NewServer creates a new Server using an underlying Service struct
|
||||||
@ -98,12 +103,16 @@ func NewServer(settings *Config) (Server, error) {
|
|||||||
sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings)
|
sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings)
|
||||||
sap.client = settings.Client
|
sap.client = settings.Client
|
||||||
sap.supportsStateDiffing = settings.SupportStateDiff
|
sap.supportsStateDiffing = settings.SupportStateDiff
|
||||||
|
sap.forwardEthCalls = settings.ForwardEthCalls
|
||||||
|
sap.proxyOnError = settings.ProxyOnError
|
||||||
|
sap.nodeNetworkId = settings.NodeNetworkID
|
||||||
var err error
|
var err error
|
||||||
sap.backend, err = eth.NewEthBackend(sap.db, ð.Config{
|
sap.backend, err = eth.NewEthBackend(sap.db, ð.Config{
|
||||||
ChainConfig: settings.ChainConfig,
|
ChainConfig: settings.ChainConfig,
|
||||||
VmConfig: vm.Config{},
|
VMConfig: vm.Config{NoBaseFee: true},
|
||||||
DefaultSender: settings.DefaultSender,
|
DefaultSender: settings.DefaultSender,
|
||||||
RPCGasCap: settings.RPCGasCap,
|
RPCGasCap: settings.RPCGasCap,
|
||||||
|
GroupCacheConfig: settings.GroupCache,
|
||||||
})
|
})
|
||||||
return sap, err
|
return sap, err
|
||||||
}
|
}
|
||||||
@ -115,37 +124,29 @@ func (sap *Service) Protocols() []p2p.Protocol {
|
|||||||
|
|
||||||
// APIs returns the RPC descriptors the watcher service offers
|
// APIs returns the RPC descriptors the watcher service offers
|
||||||
func (sap *Service) APIs() []rpc.API {
|
func (sap *Service) APIs() []rpc.API {
|
||||||
infoAPI := NewInfoAPI()
|
networkID, _ := strconv.ParseUint(sap.nodeNetworkId, 10, 64)
|
||||||
apis := []rpc.API{
|
apis := []rpc.API{
|
||||||
{
|
{
|
||||||
Namespace: APIName,
|
Namespace: APIName,
|
||||||
Version: APIVersion,
|
Version: APIVersion,
|
||||||
Service: NewPublicServerAPI(sap),
|
Service: NewPublicServerAPI(sap, sap.client),
|
||||||
Public: true,
|
Public: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Namespace: "rpc",
|
Namespace: net.APIName,
|
||||||
Version: APIVersion,
|
Version: net.APIVersion,
|
||||||
Service: infoAPI,
|
Service: net.NewPublicNetAPI(networkID, sap.client),
|
||||||
Public: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Namespace: "net",
|
|
||||||
Version: APIVersion,
|
|
||||||
Service: infoAPI,
|
|
||||||
Public: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Namespace: "admin",
|
|
||||||
Version: APIVersion,
|
|
||||||
Service: infoAPI,
|
|
||||||
Public: true,
|
Public: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing, sap.forwardEthCalls, sap.proxyOnError)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("unable to create public eth api: %v", err)
|
||||||
|
}
|
||||||
return append(apis, rpc.API{
|
return append(apis, rpc.API{
|
||||||
Namespace: eth.APIName,
|
Namespace: eth.APIName,
|
||||||
Version: eth.APIVersion,
|
Version: eth.APIVersion,
|
||||||
Service: eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing),
|
Service: ethAPI,
|
||||||
Public: true,
|
Public: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -154,7 +155,7 @@ func (sap *Service) APIs() []rpc.API {
|
|||||||
// It filters and sends this data to any subscribers to the service
|
// It filters and sends this data to any subscribers to the service
|
||||||
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
|
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
|
||||||
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
|
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
|
||||||
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload) {
|
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) {
|
||||||
sap.serveWg = wg
|
sap.serveWg = wg
|
||||||
go func() {
|
go func() {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -173,7 +174,7 @@ func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
|
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions
|
||||||
func (sap *Service) filterAndServe(payload eth2.ConvertedPayload) {
|
func (sap *Service) filterAndServe(payload eth.ConvertedPayload) {
|
||||||
log.Debug("sending eth ipld payload to subscriptions")
|
log.Debug("sending eth ipld payload to subscriptions")
|
||||||
sap.Lock()
|
sap.Lock()
|
||||||
sap.serveWg.Add(1)
|
sap.serveWg.Add(1)
|
||||||
@ -346,7 +347,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
|
|||||||
func (sap *Service) Start() error {
|
func (sap *Service) Start() error {
|
||||||
log.Info("starting eth ipld server")
|
log.Info("starting eth ipld server")
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
payloadChan := make(chan eth2.ConvertedPayload, PayloadChanBufferSize)
|
payloadChan := make(chan eth.ConvertedPayload, PayloadChanBufferSize)
|
||||||
sap.Serve(wg, payloadChan)
|
sap.Serve(wg, payloadChan)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -19,4 +19,11 @@ package shared
|
|||||||
const (
|
const (
|
||||||
DefaultMaxBatchSize uint64 = 100
|
DefaultMaxBatchSize uint64 = 100
|
||||||
DefaultMaxBatchNumber int64 = 50
|
DefaultMaxBatchNumber int64 = 50
|
||||||
|
|
||||||
|
GcachePoolEnabled = "GCACHE_POOL_ENABLED"
|
||||||
|
GcachePoolHttpPath = "GCACHE_POOL_HTTP_PATH"
|
||||||
|
GcachePoolHttpPeers = "GCACHE_POOL_HTTP_PEERS"
|
||||||
|
GcacheStatedbCacheSize = "GCACHE_STATEDB_CACHE_SIZE"
|
||||||
|
GcacheStatedbCacheExpiry = "GCACHE_STATEDB_CACHE_EXPIRY"
|
||||||
|
GcacheStatedbLogStatsInterval = "GCACHE_STATEDB_LOG_STATS_INTERVAL"
|
||||||
)
|
)
|
||||||
|
41
pkg/shared/database.go
Normal file
41
pkg/shared/database.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2022 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewDB creates a new db connection and initializes the connection pool
|
||||||
|
func NewDB(connectString string, config postgres.Config) (*sqlx.DB, error) {
|
||||||
|
db, connectErr := sqlx.Connect("postgres", connectString)
|
||||||
|
if connectErr != nil {
|
||||||
|
return nil, postgres.ErrDBConnectionFailed(connectErr)
|
||||||
|
}
|
||||||
|
if config.MaxConns > 0 {
|
||||||
|
db.SetMaxOpenConns(config.MaxConns)
|
||||||
|
}
|
||||||
|
if config.MaxIdle > 0 {
|
||||||
|
db.SetMaxIdleConns(config.MaxIdle)
|
||||||
|
}
|
||||||
|
if config.MaxConnLifetime > 0 {
|
||||||
|
db.SetConnMaxLifetime(config.MaxConnLifetime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return db, nil
|
||||||
|
}
|
@ -18,13 +18,13 @@ package shared
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-ipfs-blockstore"
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
"github.com/ipfs/go-ipfs-ds-help"
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
node "github.com/ipfs/go-ipld-format"
|
node "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer
|
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer
|
||||||
|
@ -18,12 +18,24 @@ package shared
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/jmoiron/sqlx"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IPLDsContainBytes used to check if a list of strings contains a particular string
|
// IPLDsContainBytes used to check if a list of strings contains a particular string
|
||||||
func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
|
func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool {
|
||||||
for _, ipld := range iplds {
|
for _, ipld := range iplds {
|
||||||
if bytes.Equal(ipld.Data, b) {
|
if bytes.Equal(ipld.Data, b) {
|
||||||
return true
|
return true
|
||||||
@ -31,3 +43,65 @@ func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetupDB is use to setup a db for watcher tests
|
||||||
|
func SetupDB() *sqlx.DB {
|
||||||
|
config := getTestDBConfig()
|
||||||
|
|
||||||
|
db, err := NewDB(config.DbConnectionString(), config)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
return db
|
||||||
|
}
|
||||||
|
|
||||||
|
// TearDownDB is used to tear down the watcher dbs after tests
|
||||||
|
func TearDownDB(db *sqlx.DB) {
|
||||||
|
tx, err := db.Beginx()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
_, err = tx.Exec(`DELETE FROM eth_meta.watched_addresses`)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetupTestStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, genHash common.Hash) interfaces.StateDiffIndexer {
|
||||||
|
testInfo := node.Info{
|
||||||
|
GenesisBlock: genHash.String(),
|
||||||
|
NetworkID: "1",
|
||||||
|
ID: "1",
|
||||||
|
ClientName: "geth",
|
||||||
|
ChainID: params.TestChainConfig.ChainID.Uint64(),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, getTestDBConfig())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
return stateDiffIndexer
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTestDBConfig() postgres.Config {
|
||||||
|
port, _ := strconv.Atoi(os.Getenv("DATABASE_PORT"))
|
||||||
|
return postgres.Config{
|
||||||
|
Hostname: os.Getenv("DATABASE_HOSTNAME"),
|
||||||
|
DatabaseName: os.Getenv("DATABASE_NAME"),
|
||||||
|
Username: os.Getenv("DATABASE_USER"),
|
||||||
|
Password: os.Getenv("DATABASE_PASSWORD"),
|
||||||
|
Port: port,
|
||||||
|
Driver: postgres.SQLX,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
38
pkg/shared/types.go
Normal file
38
pkg/shared/types.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// VulcanizeDB
|
||||||
|
// Copyright © 2021 Vulcanize
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package shared
|
||||||
|
|
||||||
|
type PoolConfig struct {
|
||||||
|
Enabled bool
|
||||||
|
HttpEndpoint string
|
||||||
|
PeerHttpEndpoints []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type GroupConfig struct {
|
||||||
|
CacheSizeInMB int
|
||||||
|
CacheExpiryInMins int
|
||||||
|
LogStatsIntervalInSecs int
|
||||||
|
|
||||||
|
// Used in tests to override the cache name, to work around
|
||||||
|
// the "duplicate registration of group" error from groupcache
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type GroupCacheConfig struct {
|
||||||
|
Pool PoolConfig
|
||||||
|
StateDB GroupConfig
|
||||||
|
}
|
17
scripts/run_integration_test.sh
Executable file
17
scripts/run_integration_test.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
set -e
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
export ETH_FORWARD_ETH_CALLS=false
|
||||||
|
export DB_WRITE=true
|
||||||
|
export ETH_PROXY_ON_ERROR=false
|
||||||
|
|
||||||
|
export PGPASSWORD=password
|
||||||
|
export DATABASE_USER=vdbm
|
||||||
|
export DATABASE_PORT=8077
|
||||||
|
export DATABASE_PASSWORD=password
|
||||||
|
export DATABASE_HOSTNAME=127.0.0.1
|
||||||
|
|
||||||
|
# Wait for containers to be up and execute the integration test.
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||||
|
make integrationtest
|
17
scripts/run_integration_test_forward_eth_calls.sh
Executable file
17
scripts/run_integration_test_forward_eth_calls.sh
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
set -e
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
export ETH_FORWARD_ETH_CALLS=true
|
||||||
|
export DB_WRITE=false
|
||||||
|
export ETH_PROXY_ON_ERROR=false
|
||||||
|
|
||||||
|
export PGPASSWORD=password
|
||||||
|
export DATABASE_USER=vdbm
|
||||||
|
export DATABASE_PORT=8077
|
||||||
|
export DATABASE_PASSWORD=password
|
||||||
|
export DATABASE_HOSTNAME=127.0.0.1
|
||||||
|
|
||||||
|
# Wait for containers to be up and execute the integration test.
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
|
||||||
|
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
|
||||||
|
make integrationtest
|
8
scripts/run_unit_test.sh
Executable file
8
scripts/run_unit_test.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
# Clear up existing docker images and volume.
|
||||||
|
docker-compose down --remove-orphans --volumes
|
||||||
|
|
||||||
|
docker-compose -f docker-compose.yml up -d ipld-eth-db
|
||||||
|
sleep 10
|
||||||
|
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
|
||||||
|
|
||||||
|
docker-compose down --remove-orphans --volumes
|
87
test/README.md
Normal file
87
test/README.md
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
# Test Insructions
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator) and [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
|
||||||
|
|
||||||
|
- Checkout [v3 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.17-statediff-3.2.1) in go-ethereum repo.
|
||||||
|
```bash
|
||||||
|
# In go-ethereum repo.
|
||||||
|
git checkout v1.10.17-statediff-3.2.1
|
||||||
|
```
|
||||||
|
|
||||||
|
- Checkout working commit in stack-orchestrator repo.
|
||||||
|
```bash
|
||||||
|
# In stack-orchestrator repo.
|
||||||
|
git checkout fcbc74451c5494664fe21f765e89c9c6565c07cb
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
- Run unit tests:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In ipld-eth-server root directory.
|
||||||
|
./scripts/run_unit_test.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
- Run integration tests:
|
||||||
|
|
||||||
|
- Update (Replace existing content) config file [config.sh](https://github.com/vulcanize/stack-orchestrator/blob/main/config.sh) in stack-orchestrator repo:
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Path to go-ethereum repo.
|
||||||
|
vulcanize_go_ethereum=~/go-ethereum/
|
||||||
|
|
||||||
|
# Path to ipld-eth-server repo.
|
||||||
|
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||||
|
|
||||||
|
db_write=true
|
||||||
|
eth_forward_eth_calls=false
|
||||||
|
eth_proxy_on_error=false
|
||||||
|
eth_http_path="go-ethereum:8545"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Run stack-orchestrator:
|
||||||
|
```bash
|
||||||
|
# In stack-orchestrator root directory.
|
||||||
|
cd helper-scripts
|
||||||
|
|
||||||
|
./wrapper.sh \
|
||||||
|
-e docker \
|
||||||
|
-d ../docker/latest/docker-compose-db.yml \
|
||||||
|
-d ../docker/local/docker-compose-go-ethereum.yml \
|
||||||
|
-d ../docker/local/docker-compose-ipld-eth-server.yml \
|
||||||
|
-v remove \
|
||||||
|
-p ../config.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
- Run test:
|
||||||
|
```bash
|
||||||
|
# In ipld-eth-server root directory.
|
||||||
|
./scripts/run_integration_test.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
- Update `config.sh` file:
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Path to go-ethereum repo.
|
||||||
|
vulcanize_go_ethereum=~/go-ethereum/
|
||||||
|
|
||||||
|
# Path to ipld-eth-server repo.
|
||||||
|
vulcanize_ipld_eth_server=~/ipld-eth-server/
|
||||||
|
|
||||||
|
db_write=false
|
||||||
|
eth_forward_eth_calls=true
|
||||||
|
eth_proxy_on_error=false
|
||||||
|
eth_http_path="go-ethereum:8545"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Stop the stack-orchestrator and start again using the same command
|
||||||
|
|
||||||
|
- Run integration tests for direct proxy fall-through of eth_calls:
|
||||||
|
```bash
|
||||||
|
./scripts/run_integration_test_forward_eth_calls.sh
|
||||||
|
```
|
3
test/contract/.dockerignore
Normal file
3
test/contract/.dockerignore
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
node_modules
|
||||||
|
artifacts
|
||||||
|
cache
|
5
test/contract/.gitignore
vendored
Normal file
5
test/contract/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
node_modules
|
||||||
|
|
||||||
|
#Hardhat files
|
||||||
|
cache
|
||||||
|
artifacts
|
14
test/contract/Dockerfile
Normal file
14
test/contract/Dockerfile
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
FROM node:14
|
||||||
|
|
||||||
|
ARG ETH_ADDR
|
||||||
|
ENV ETH_ADDR $ETH_ADDR
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
COPY package*.json ./
|
||||||
|
RUN npm ci
|
||||||
|
COPY . .
|
||||||
|
RUN npm run compile && ls -lah
|
||||||
|
|
||||||
|
EXPOSE 3000
|
||||||
|
|
||||||
|
ENTRYPOINT ["npm", "start"]
|
10
test/contract/contracts/GLDToken.sol
Normal file
10
test/contract/contracts/GLDToken.sol
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
pragma solidity ^0.8.0;
|
||||||
|
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
||||||
|
contract GLDToken is ERC20 {
|
||||||
|
constructor() ERC20("Gold", "GLD") {
|
||||||
|
_mint(msg.sender, 1000000000000000000000);
|
||||||
|
}
|
||||||
|
function destroy() public {
|
||||||
|
selfdestruct(payable(msg.sender));
|
||||||
|
}
|
||||||
|
}
|
25
test/contract/contracts/SLVToken.sol
Normal file
25
test/contract/contracts/SLVToken.sol
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// SPDX-License-Identifier: AGPL-3.0
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
||||||
|
|
||||||
|
contract SLVToken is ERC20 {
|
||||||
|
uint256 private countA;
|
||||||
|
uint256 private countB;
|
||||||
|
|
||||||
|
constructor() ERC20("Silver", "SLV") {}
|
||||||
|
|
||||||
|
function incrementCountA() public {
|
||||||
|
countA = countA + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
function incrementCountB() public {
|
||||||
|
countB = countB + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
receive() external payable {}
|
||||||
|
|
||||||
|
function destroy() public {
|
||||||
|
selfdestruct(payable(msg.sender));
|
||||||
|
}
|
||||||
|
}
|
43
test/contract/hardhat.config.js
Normal file
43
test/contract/hardhat.config.js
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
require("@nomiclabs/hardhat-waffle");
|
||||||
|
|
||||||
|
// This is a sample Hardhat task. To learn how to create your own go to
|
||||||
|
// https://hardhat.org/guides/create-task.html
|
||||||
|
task("accounts", "Prints the list of accounts", async () => {
|
||||||
|
const accounts = await ethers.getSigners();
|
||||||
|
|
||||||
|
for (const account of accounts) {
|
||||||
|
console.log(account.address);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// You need to export an object to set up your config
|
||||||
|
// Go to https://hardhat.org/config/ to learn more
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @type import('hardhat/config').HardhatUserConfig
|
||||||
|
*/
|
||||||
|
module.exports = {
|
||||||
|
solidity: {
|
||||||
|
version: "0.8.0",
|
||||||
|
settings: {
|
||||||
|
outputSelection: {
|
||||||
|
'*': {
|
||||||
|
'*': [
|
||||||
|
'abi', 'storageLayout',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
networks: {
|
||||||
|
local: {
|
||||||
|
url: 'http://127.0.0.1:8545',
|
||||||
|
chainId: 99
|
||||||
|
},
|
||||||
|
docker: {
|
||||||
|
url: process.env.ETH_ADDR,
|
||||||
|
chainId: 99
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
27651
test/contract/package-lock.json
generated
Normal file
27651
test/contract/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
28
test/contract/package.json
Normal file
28
test/contract/package.json
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"name": "contract",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"main": "index.js",
|
||||||
|
"scripts": {
|
||||||
|
"compile": "npx hardhat compile",
|
||||||
|
"start": "HARDHAT_NETWORK=docker node src/index.js",
|
||||||
|
"start:local": "ETH_ADDR=http://127.0.0.1:8545 npm run start",
|
||||||
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
|
},
|
||||||
|
"keywords": [],
|
||||||
|
"author": "",
|
||||||
|
"license": "ISC",
|
||||||
|
"description": "",
|
||||||
|
"dependencies": {
|
||||||
|
"@openzeppelin/contracts": "^4.0.0",
|
||||||
|
"fastify": "^3.14.2",
|
||||||
|
"hardhat": "^2.2.0",
|
||||||
|
"solidity-create2-deployer": "^0.4.0"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@nomiclabs/hardhat-ethers": "^2.0.2",
|
||||||
|
"@nomiclabs/hardhat-waffle": "^2.0.1",
|
||||||
|
"chai": "^4.3.4",
|
||||||
|
"ethereum-waffle": "^3.3.0",
|
||||||
|
"ethers": "^5.1.0"
|
||||||
|
}
|
||||||
|
}
|
18
test/contract/scripts/deploy.js
Normal file
18
test/contract/scripts/deploy.js
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
const hre = require("hardhat");
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
// await hre.run('compile');
|
||||||
|
// We get the contract to deploy
|
||||||
|
const GLDToken = await hre.ethers.getContractFactory("GLDToken");
|
||||||
|
const token = await GLDToken.deploy();
|
||||||
|
await token.deployed();
|
||||||
|
console.log("GLDToken deployed to:", token.address, token.deployTransaction.hash);
|
||||||
|
}
|
||||||
|
// We recommend this pattern to be able to use async/await everywhere
|
||||||
|
// and properly handle errors.
|
||||||
|
main()
|
||||||
|
.then(() => process.exit(0))
|
||||||
|
.catch(error => {
|
||||||
|
console.error(error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
36
test/contract/scripts/sample-script.js
Normal file
36
test/contract/scripts/sample-script.js
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
// We require the Hardhat Runtime Environment explicitly here. This is optional
|
||||||
|
// but useful for running the script in a standalone fashion through `node <script>`.
|
||||||
|
//
|
||||||
|
// When running the script with `hardhat run <script>` you'll find the Hardhat
|
||||||
|
// Runtime Environment's members available in the global scope.
|
||||||
|
const hre = require("hardhat");
|
||||||
|
|
||||||
|
async function main() {
|
||||||
|
// Hardhat always runs the compile task when running scripts with its command
|
||||||
|
// line interface.
|
||||||
|
//
|
||||||
|
// If this script is run directly using `node` you may want to call compile
|
||||||
|
// manually to make sure everything is compiled
|
||||||
|
// await hre.run('compile');
|
||||||
|
|
||||||
|
// We get the contract to deploy
|
||||||
|
const Greeter = await hre.ethers.getContractFactory("Greeter");
|
||||||
|
const greeter = await Greeter.deploy("Hello, Hardhat!");
|
||||||
|
|
||||||
|
await greeter.deployed();
|
||||||
|
|
||||||
|
console.log("Greeter deployed to:", greeter.address, "; tx hash: ", greeter.deployTransaction.hash);
|
||||||
|
|
||||||
|
const result = await greeter.setGreeting("Hello 123!");
|
||||||
|
|
||||||
|
console.log("Greeter updated", "; tx hash: ", result.hash, "; block hash: ", result.blockHash);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We recommend this pattern to be able to use async/await everywhere
|
||||||
|
// and properly handle errors.
|
||||||
|
main()
|
||||||
|
.then(() => process.exit(0))
|
||||||
|
.catch(error => {
|
||||||
|
console.error(error);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user