Compare commits

..

No commits in common. "master" and "v0.3.1-alpha" have entirely different histories.

110 changed files with 3743 additions and 36839 deletions

View File

@ -1,29 +0,0 @@
name: Notion Sync
on:
workflow_dispatch:
issues:
types:
[
opened,
edited,
labeled,
unlabeled,
assigned,
unassigned,
milestoned,
demilestoned,
reopened,
closed,
]
jobs:
notion_job:
runs-on: ubuntu-latest
name: Add GitHub Issues to Notion
steps:
- name: Add GitHub Issues to Notion
uses: vulcanize/notion-github-action@v1.2.4-issueid
with:
notion-token: ${{ secrets.NOTION_TOKEN }}
notion-db: ${{ secrets.NOTION_DATABASE }}

25
.github/workflows/on-master.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Docker Compose Build
on:
push:
branches:
- master
jobs:
build:
name: Run docker build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Get the version
id: vars
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
- name: Run docker build
run: make docker-build
- name: Tag docker image
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
- name: Docker Login
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Push
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}

View File

@ -1,10 +1,35 @@
name: Docker Build name: Docker Build
on: [pull_request] on: [pull_request]
jobs: jobs:
run-tests: build:
uses: ./.github/workflows/tests.yaml name: Run docker build
secrets: runs-on: ubuntu-latest
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }} steps:
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }} - uses: actions/checkout@v2
BUILD_KEY: ${{ secrets.BUILD_KEY }} - name: Run docker build
run: make docker-build
test:
name: Run integration tests
env:
GOPATH: /tmp/go
strategy:
matrix:
go-version: [1.14.x, 1.15.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- uses: actions/checkout@v2
- name: Run database
run: docker-compose up -d db
- name: Test
run: |
sleep 10
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 make test

View File

@ -3,34 +3,9 @@ on:
release: release:
types: [published] types: [published]
jobs: jobs:
run-tests:
uses: ./.github/workflows/tests.yaml
secrets:
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
BUILD_KEY: ${{ secrets.BUILD_KEY }}
build:
name: Run docker build
runs-on: ubuntu-latest
needs: run-tests
steps:
- uses: actions/checkout@v2
- name: Get the version
id: vars
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
- name: Run docker build
run: make docker-build
- name: Tag docker image
run: docker tag vulcanize/ipld-eth-server docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
- name: Docker Login
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
- name: Docker Push
run: docker push docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}}
push_to_registries: push_to_registries:
name: Push Docker image to Docker Hub name: Push Docker image to Docker Hub
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build
steps: steps:
- name: Get the version - name: Get the version
id: vars id: vars
@ -47,3 +22,4 @@ jobs:
run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}} run: docker tag docker.pkg.github.com/vulcanize/ipld-eth-server/ipld-eth-server:${{steps.vars.outputs.sha}} vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}
- name: Docker Push to Docker Hub - name: Docker Push to Docker Hub
run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}} run: docker push vulcanize/ipld-eth-server:${{steps.vars.outputs.tag}}

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -e
# Set up repo
start_dir=$(pwd)
temp_dir=$(mktemp -d)
cd $temp_dir
git clone -b $(cat /tmp/git_head_ref) "https://github.com/$(cat /tmp/git_repository).git"
cd ipld-eth-server
## Remove the branch and github related info. This way future runs wont be confused.
rm -f /tmp/git_head_ref /tmp/git_repository
# Spin up DB
docker-compose -f docker-compose.yml up -d ipld-eth-db
trap "docker-compose down --remove-orphans; cd $start_dir ; rm -r $temp_dir" SIGINT SIGTERM ERR
sleep 10
# Remove old logs so there's no confusion, then run test
rm -f /tmp/test.log /tmp/return_test.txt
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test > /tmp/test.log
echo $? > /tmp/return_test.txt
# Clean up
docker-compose -f docker-compose.yml down -v --remove-orphans
cd $start_dir
rm -fr $temp_dir

View File

@ -1,189 +0,0 @@
name: Test the stack.
on:
workflow_call:
secrets:
BUILD_HOSTNAME:
required: true
BUILD_USERNAME:
required: true
BUILD_KEY:
required: true
jobs:
build:
name: Run docker build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run docker build
run: make docker-build
test:
name: Run unit tests
env:
GOPATH: /tmp/go
# To run the unit tests you need to add secrets to your repository.
BUILD_HOSTNAME: ${{ secrets.BUILD_HOSTNAME }}
BUILD_USERNAME: ${{ secrets.BUILD_USERNAME }}
BUILD_KEY: ${{ secrets.BUILD_KEY }}
#strategy:
# matrix:
# go-version: [1.16.x, 1.17.x]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Passed experience with GHA has taught me to store variables in files instead of passing them as variables.
- name: Output variables to files
run: |
echo $GITHUB_REPOSITORY > /tmp/git_repository
[ -z "$GITHUB_HEAD_REF" ] && echo $GITHUB_REF_NAME > /tmp/git_head_ref || echo $GITHUB_HEAD_REF > /tmp/git_head_ref
echo "-----BEGIN OPENSSH PRIVATE KEY-----" >> /tmp/key
echo ${{ env.BUILD_KEY }} >> /tmp/key
echo "-----END OPENSSH PRIVATE KEY-----" >> /tmp/key
chmod 400 /tmp/key
cat /tmp/git_repository
cat /tmp/git_head_ref
echo
- name: Raw SCP
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_repository ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_repository
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key /tmp/git_head_ref ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/git_head_ref
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key .github/workflows/run_unit_test.sh ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/run_unit_test.sh
- name: Trigger Unit Test
run: |
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} go install github.com/onsi/ginkgo/ginkgo@latest
ssh -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }} /tmp/run_unit_test.sh
- name: Get the logs and cat them
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/test.log .
cat ./test.log
- name: Check Error Code
run: |
scp -o 'StrictHostKeyChecking no' -o UserKnownHostsFile=/dev/null -q -i /tmp/key ${{ env.BUILD_USERNAME }}@${{ env.BUILD_HOSTNAME }}:/tmp/return_test.txt .
[ $(cat ./return_test.txt) -eq 0 ]
integrationtest:
name: Run integration tests
env:
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
GOPATH: /tmp/go
DB_WRITE: true
ETH_FORWARD_ETH_CALLS: false
ETH_PROXY_ON_ERROR: false
ETH_HTTP_PATH: "go-ethereum:8545"
runs-on: ubuntu-latest
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/setup-go@v3
with:
go-version: ">=1.18.0"
check-latest: true
- uses: actions/checkout@v2
with:
path: "./ipld-eth-server"
- uses: actions/checkout@v2
with:
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
- uses: actions/checkout@v2
with:
ref: ${{ env.GO_ETHEREUM_REF }}
repository: vulcanize/go-ethereum
path: "./go-ethereum/"
- name: Create config file
run: |
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
echo db_write=$DB_WRITE >> ./config.sh
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
cat ./config.sh
- name: Build geth
run: |
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
./compile-geth.sh \
-p "$GITHUB_WORKSPACE/config.sh" \
-e docker
- name: Run docker compose
run: |
docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
--env-file "$GITHUB_WORKSPACE/config.sh" \
up -d --build
- name: Test
run: |
cd $GITHUB_WORKSPACE/ipld-eth-server
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest
integrationtest_forwardethcalls:
name: Run integration tests for direct proxy fall-through of eth_calls
env:
STACK_ORCHESTRATOR_REF: fcbc74451c5494664fe21f765e89c9c6565c07cb
GO_ETHEREUM_REF: 498101102c891c4f8c3cab5649158c642ee1fd6b
GOPATH: /tmp/go
DB_WRITE: false
ETH_FORWARD_ETH_CALLS: true
ETH_PROXY_ON_ERROR: false
ETH_HTTP_PATH: "go-ethereum:8545"
runs-on: ubuntu-latest
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/setup-go@v3
with:
go-version: ">=1.18.0"
check-latest: true
- uses: actions/checkout@v2
with:
path: "./ipld-eth-server"
- uses: actions/checkout@v2
with:
ref: ${{ env.STACK_ORCHESTRATOR_REF }}
path: "./stack-orchestrator/"
repository: vulcanize/stack-orchestrator
- uses: actions/checkout@v2
with:
ref: ${{ env.GO_ETHEREUM_REF }}
repository: vulcanize/go-ethereum
path: "./go-ethereum/"
- name: Create config file
run: |
echo vulcanize_go_ethereum=$GITHUB_WORKSPACE/go-ethereum/ > ./config.sh
echo vulcanize_ipld_eth_server=$GITHUB_WORKSPACE/ipld-eth-server/ >> ./config.sh
echo db_write=$DB_WRITE >> ./config.sh
echo eth_forward_eth_calls=$ETH_FORWARD_ETH_CALLS >> ./config.sh
echo eth_proxy_on_error=$ETH_PROXY_ON_ERROR >> ./config.sh
echo eth_http_path=$ETH_HTTP_PATH >> ./config.sh
cat ./config.sh
- name: Build geth
run: |
cd $GITHUB_WORKSPACE/stack-orchestrator/helper-scripts
./compile-geth.sh \
-p "$GITHUB_WORKSPACE/config.sh" \
-e docker
- name: Run docker compose
run: |
docker-compose \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/latest/docker-compose-db.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-go-ethereum.yml" \
-f "$GITHUB_WORKSPACE/stack-orchestrator/docker/local/docker-compose-ipld-eth-server.yml" \
--env-file "$GITHUB_WORKSPACE/config.sh" \
up -d --build
- name: Test
run: |
cd $GITHUB_WORKSPACE/ipld-eth-server
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

View File

@ -1,4 +1,4 @@
FROM golang:1.18-alpine as builder FROM golang:1.13-alpine as builder
RUN apk --update --no-cache add make git g++ linux-headers RUN apk --update --no-cache add make git g++ linux-headers
# DEBUG # DEBUG
@ -6,17 +6,8 @@ RUN apk add busybox-extras
# Build ipld-eth-server # Build ipld-eth-server
WORKDIR /go/src/github.com/vulcanize/ipld-eth-server WORKDIR /go/src/github.com/vulcanize/ipld-eth-server
ADD . .
# Cache the modules RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
ENV GO111MODULE=on
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
# Build the binary
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipld-eth-server .
# Copy migration tool # Copy migration tool
WORKDIR / WORKDIR /
@ -44,6 +35,7 @@ COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/ipld-eth-serv
# keep binaries immutable # keep binaries immutable
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/ipld-eth-server ipld-eth-server
COPY --from=builder /goose goose COPY --from=builder /goose goose
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/db/migrations migrations/vulcanizedb
COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments COPY --from=builder /go/src/github.com/vulcanize/ipld-eth-server/environments environments
ENTRYPOINT ["/app/entrypoint.sh"] ENTRYPOINT ["/app/entrypoint.sh"]

View File

@ -3,6 +3,10 @@ BASE = $(GOPATH)/src/$(PACKAGE)
PKGS = go list ./... | grep -v "^vendor/" PKGS = go list ./... | grep -v "^vendor/"
# Tools # Tools
## Testing library
GINKGO = $(BIN)/ginkgo
$(BIN)/ginkgo:
go get -u github.com/onsi/ginkgo/ginkgo
## Migration tool ## Migration tool
GOOSE = $(BIN)/goose GOOSE = $(BIN)/goose
@ -22,9 +26,8 @@ $(BIN)/gometalinter.v2:
.PHONY: installtools .PHONY: installtools
installtools: | $(LINT) $(GOOSE) installtools: | $(LINT) $(GOOSE) $(GINKGO)
echo "Installing tools" echo "Installing tools"
go mod download
.PHONY: metalint .PHONY: metalint
metalint: | $(METALINT) metalint: | $(METALINT)
@ -51,22 +54,46 @@ TEST_CONNECT_STRING = postgresql://$(DATABASE_USER):$(DATABASE_PASSWORD)@$(DATAB
TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable TEST_CONNECT_STRING_LOCAL = postgresql://$(USER)@$(HOST_NAME):$(PORT)/$(TEST_DB)?sslmode=disable
.PHONY: test .PHONY: test
test: | $(GOOSE) test: | $(GINKGO) $(GOOSE)
go vet ./... go vet ./...
go fmt ./... go fmt ./...
go run github.com/onsi/ginkgo/ginkgo -r --skipPackage=test export PGPASSWORD=$(DATABASE_PASSWORD)
dropdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) --if-exists $(TEST_DB)
createdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) $(TEST_DB)
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING)" up
$(GINKGO) -r --skipPackage=integration_tests,integration
.PHONY: integrationtest .PHONY: integrationtest
integrationtest: | $(GOOSE) integrationtest: | $(GINKGO) $(GOOSE)
go vet ./... go vet ./...
go fmt ./... go fmt ./...
go run github.com/onsi/ginkgo/ginkgo -r test/ -v export PGPASSWORD=$(DATABASE_PASSWORD)
dropdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) --if-exists $(TEST_DB)
createdb -h $(DATABASE_HOSTNAME) -p $(DATABASE_PORT) -U $(DATABASE_USER) $(TEST_DB)
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING)" up
$(GINKGO) -r integration_test/
.PHONY: test_local .PHONY: test_local
test_local: | $(GOOSE) test_local: | $(GINKGO) $(GOOSE)
go vet ./... go vet ./...
go fmt ./... go fmt ./...
./scripts/run_unit_test.sh dropdb -h $(HOST_NAME) -p $(PORT) -U $(USER) --if-exists $(TEST_DB)
createdb -h $(HOST_NAME) -p $(PORT) -U $(USER) $(TEST_DB)
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" up
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" reset
make migrate NAME=$(TEST_DB)
$(GINKGO) -r --skipPackage=integration_tests,integration
.PHONY: integrationtest_local
integrationtest_local: | $(GINKGO) $(GOOSE)
go vet ./...
go fmt ./...
dropdb -h $(HOST_NAME) -p $(PORT) -U $(USER) --if-exists $(TEST_DB)
createdb -h $(HOST_NAME) -p $(PORT) -U $(USER) $(TEST_DB)
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" up
$(GOOSE) -dir db/migrations postgres "$(TEST_CONNECT_STRING_LOCAL)" reset
make migrate NAME=$(TEST_DB)
$(GINKGO) -r integration_test/
build: build:
go fmt ./... go fmt ./...
@ -135,4 +162,4 @@ import:
## Build docker image ## Build docker image
.PHONY: docker-build .PHONY: docker-build
docker-build: docker-build:
docker build -t vulcanize/ipld-eth-server . docker build -t vulcanize/ipld-eth-server .

View File

@ -33,9 +33,9 @@ External dependency
## Install ## Install
Start by downloading ipld-eth-server and moving into the repo: Start by downloading ipld-eth-server and moving into the repo:
`GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server/v3` `GO111MODULE=off go get -d github.com/vulcanize/ipld-eth-server`
`cd $GOPATH/src/github.com/vulcanize/ipld-eth-server/v3@v3.x.x` `cd $GOPATH/src/github.com/vulcanize/ipld-eth-server`
Then, build the binary: Then, build the binary:
@ -121,35 +121,9 @@ The currently supported standard endpoints are:
TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice) TODO: Add the rest of the standard endpoints and unique endpoints (e.g. getSlice)
### CLI Options and Environment variables
| CLI Option | Environment Variable | Default Value | Comment |
| ----------------------------- | ----------------------------- | ---------------- | ----------------------------------- |
| `database-hostname` | `DATABASE_HOSTNAME` | localhost | IPLD database host |
| `database-port` | `DATABASE_PORT` | 5432 | IPLD database port |
| `database-name` | `DATABASE_NAME` | vulcanize_public | IPLD database name |
| `database-user` | `DATABASE_USER` | | IPLD database user |
| `database-password` | `DATABASE_PASSWORD` | | IPLD database password |
| `eth-server-graphql` | `ETH_SERVER_GRAPHQL` | false | If `true` enable Eth GraphQL Server |
| `eth-server-graphql-path` | `ETH_SERVER_GRAPHQLPATH` | | If `eth-server-graphql` set to true, endpoint url for graphql server (host:port) |
| `eth-server-http` | `ETH_SERVER_HTTP` | true | If `true` enable Eth HTTP JSON-RPC Server |
| `eth-server-http-path` | `ETH_SERVER_HTTPPATH` | | If `eth-server-http` set to `true`, endpoint url for Eth HTTP JSON-RPC server (host:port) |
| `eth-server-ws` | `ETH_SERVER_WS` | false | If `true` enable Eth WS JSON-RPC Server |
| `eth-server-ws-path` | `ETH_SERVER_WSPATH` | | If `eth-server-ws` set to `true`, endpoint url for Eth WS JSON-RPC server (host:port) |
| `eth-server-ipc` | `ETH_SERVER_IPC` | false | If `true` enable Eth IPC JSON-RPC Server |
| `eth-server-ipc-path` | `ETH_SERVER_IPC_PATH` | | If `eth-server-ws` set to `true`, path for Eth IPC JSON-RPC server |
| `ipld-server-graphql` | `IPLD_SERVER_GRAPHQL` | false | If `true` enable IPLD GraphQL Server |
| `ipld-server-graphql-path` | `IPLD_SERVER_GRAPHQLPATH` | | If `ipld-server-graphql` set to true, endpoint url for graphql server (host:port) |
| `ipld-postgraphile-path` | `IPLD_POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of IPLD db |
| `tracing-http-path` | `TRACING_HTTPPATH` | | If `ipld-server-graphql` set to true, http url for tracing server |
| `tracing-postgraphile-path` | `TRACING.POSTGRAPHILEPATH` | | If `ipld-server-graphql` set to true, http url for postgraphile server on top of tracing db |
### Testing ### Testing
`make test` will run the unit tests
Follow steps in [test/README.md](./test/README.md) `make test` setups a clean `vulcanize_testing` db
## Monitoring ## Monitoring

View File

@ -1,16 +0,0 @@
{
"chainId": 4,
"homesteadBlock": 1,
"eip150Block": 2,
"eip150Hash": "0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9",
"eip155Block": 3,
"eip158Block": 3,
"byzantiumBlock": 3,
"constantinopleBlock": 3,
"petersburgBlock": 3,
"istanbulBlock": 3,
"clique": {
"period": 15,
"epoch": 30000
}
}

View File

@ -1,37 +0,0 @@
// Copyright © 2021 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func addDatabaseFlags(command *cobra.Command) {
// database flags
command.PersistentFlags().String("database-name", "vulcanize_public", "database name")
command.PersistentFlags().Int("database-port", 5432, "database port")
command.PersistentFlags().String("database-hostname", "localhost", "database hostname")
command.PersistentFlags().String("database-user", "", "database user")
command.PersistentFlags().String("database-password", "", "database password")
// database flag bindings
viper.BindPFlag("database.name", command.PersistentFlags().Lookup("database-name"))
viper.BindPFlag("database.port", command.PersistentFlags().Lookup("database-port"))
viper.BindPFlag("database.hostname", command.PersistentFlags().Lookup("database-hostname"))
viper.BindPFlag("database.user", command.PersistentFlags().Lookup("database-user"))
viper.BindPFlag("database.password", command.PersistentFlags().Lookup("database-password"))
}

View File

@ -19,20 +19,16 @@ package cmd
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath"
"strings" "strings"
"github.com/joho/godotenv"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/pkg/prom"
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom"
) )
var ( var (
cfgFile string cfgFile string
envFile string
subCommand string subCommand string
logWithCommand log.Entry logWithCommand log.Entry
) )
@ -103,8 +99,11 @@ func init() {
viper.AutomaticEnv() viper.AutomaticEnv()
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location") rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
rootCmd.PersistentFlags().StringVar(&envFile, "env", "", "environment file location") rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
rootCmd.PersistentFlags().String("database-user", "", "database user")
rootCmd.PersistentFlags().String("database-password", "", "database password")
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file") rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)") rootCmd.PersistentFlags().String("log-level", log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
rootCmd.PersistentFlags().String("log-file", "", "file path for logging") rootCmd.PersistentFlags().String("log-file", "", "file path for logging")
@ -115,6 +114,11 @@ func init() {
rootCmd.PersistentFlags().String("prom-http-addr", "127.0.0.1", "http host for prometheus") rootCmd.PersistentFlags().String("prom-http-addr", "127.0.0.1", "http host for prometheus")
rootCmd.PersistentFlags().String("prom-http-port", "8090", "http port for prometheus") rootCmd.PersistentFlags().String("prom-http-port", "8090", "http port for prometheus")
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
viper.BindPFlag("database.hostname", rootCmd.PersistentFlags().Lookup("database-hostname"))
viper.BindPFlag("database.user", rootCmd.PersistentFlags().Lookup("database-user"))
viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password"))
viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level")) viper.BindPFlag("log.level", rootCmd.PersistentFlags().Lookup("log-level"))
viper.BindPFlag("log.file", rootCmd.PersistentFlags().Lookup("log-file")) viper.BindPFlag("log.file", rootCmd.PersistentFlags().Lookup("log-file"))
@ -126,32 +130,14 @@ func init() {
} }
func initConfig() { func initConfig() {
if cfgFile == "" && envFile == "" {
log.Fatal("No configuration file specified, use --config , --env flag to provide configuration")
}
if cfgFile != "" { if cfgFile != "" {
if filepath.Ext(cfgFile) != ".toml" {
log.Fatal("Provide .toml file for --config flag")
}
viper.SetConfigFile(cfgFile) viper.SetConfigFile(cfgFile)
if err := viper.ReadInConfig(); err != nil { if err := viper.ReadInConfig(); err == nil {
log.Fatalf("Couldn't read config file: %s", err.Error()) log.Printf("Using config file: %s", viper.ConfigFileUsed())
} else {
log.Fatal(fmt.Sprintf("Couldn't read config file: %s", err.Error()))
} }
} else {
log.Infof("Using config file: %s", viper.ConfigFileUsed()) log.Warn("No config file passed with --config flag")
}
if envFile != "" {
if filepath.Ext(envFile) != ".env" {
log.Fatal("Provide .env file for --env flag")
}
if err := godotenv.Load(envFile); err != nil {
log.Fatalf("Failed to set environment variable from env file: %s", err.Error())
}
log.Infof("Using env file: %s", envFile)
} }
} }

View File

@ -16,31 +16,24 @@
package cmd package cmd
import ( import (
"errors"
"net/http"
"net/url"
"os" "os"
"os/signal" "os/signal"
"strings"
"sync" "sync"
"time"
"github.com/vulcanize/ipld-eth-server/pkg/graphql"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/mailgun/groupcache/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/gap-filler/pkg/mux"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
srpc "github.com/vulcanize/ipld-eth-server/v3/pkg/rpc" srpc "github.com/vulcanize/ipld-eth-server/pkg/rpc"
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve" s "github.com/vulcanize/ipld-eth-server/pkg/serve"
v "github.com/vulcanize/ipld-eth-server/v3/version" v "github.com/vulcanize/ipld-eth-server/version"
) )
var ErrNoRpcEndpoints = errors.New("no rpc endpoints is available")
// serveCmd represents the serve command // serveCmd represents the serve command
var serveCmd = &cobra.Command{ var serveCmd = &cobra.Command{
Use: "serve", Use: "serve",
@ -78,29 +71,12 @@ func serve() {
if err := startServers(server, serverConfig); err != nil { if err := startServers(server, serverConfig); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
graphQL, err := startEthGraphQL(server, serverConfig) graphQL, err := startGraphQL(server)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
err = startIpldGraphQL(serverConfig) shutdown := make(chan os.Signal)
if err != nil {
logWithCommand.Fatal(err)
}
err = startGroupCacheService(serverConfig)
if err != nil {
logWithCommand.Fatal(err)
}
if serverConfig.StateValidationEnabled {
go startStateTrieValidator(serverConfig, server)
logWithCommand.Info("state validator enabled")
} else {
logWithCommand.Info("state validator disabled")
}
shutdown := make(chan os.Signal, 1)
signal.Notify(shutdown, os.Interrupt) signal.Notify(shutdown, os.Interrupt)
<-shutdown <-shutdown
if graphQL != nil { if graphQL != nil {
@ -111,43 +87,27 @@ func serve() {
} }
func startServers(server s.Server, settings *s.Config) error { func startServers(server s.Server, settings *s.Config) error {
if settings.IPCEnabled { logWithCommand.Info("starting up IPC server")
logWithCommand.Info("starting up IPC server") _, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs())
_, _, err := srpc.StartIPCEndpoint(settings.IPCEndpoint, server.APIs()) if err != nil {
if err != nil { return err
return err
}
} else {
logWithCommand.Info("IPC server is disabled")
} }
logWithCommand.Info("starting up WS server")
if settings.WSEnabled { _, _, err = srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb"}, nil, true)
logWithCommand.Info("starting up WS server") if err != nil {
_, _, err := srpc.StartWSEndpoint(settings.WSEndpoint, server.APIs(), []string{"vdb", "net"}, nil, true) return err
if err != nil {
return err
}
} else {
logWithCommand.Info("WS server is disabled")
} }
logWithCommand.Info("starting up HTTP server")
if settings.HTTPEnabled { _, err = srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"eth"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
logWithCommand.Info("starting up HTTP server") return err
_, err := srpc.StartHTTPEndpoint(settings.HTTPEndpoint, server.APIs(), []string{"vdb", "eth", "net"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
if err != nil {
return err
}
} else {
logWithCommand.Info("HTTP server is disabled")
}
return nil
} }
func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphql.Service, err error) { func startGraphQL(server s.Server) (graphQLServer *graphql.Service, err error) {
if settings.EthGraphqlEnabled { viper.BindEnv("server.graphql", "SERVER_GRAPHQL")
logWithCommand.Info("starting up ETH GraphQL server") if viper.GetBool("server.graphql") {
endPoint := settings.EthGraphqlEndpoint logWithCommand.Info("starting up GraphQL server")
viper.BindEnv("server.graphqlEndpoint", "SERVER_GRAPHQL_ENDPOINT")
endPoint := viper.GetString("server.graphqlEndpoint")
if endPoint != "" { if endPoint != "" {
graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{}) graphQLServer, err = graphql.New(server.Backend(), endPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
if err != nil { if err != nil {
@ -155,185 +115,19 @@ func startEthGraphQL(server s.Server, settings *s.Config) (graphQLServer *graphq
} }
err = graphQLServer.Start(nil) err = graphQLServer.Start(nil)
} }
} else {
logWithCommand.Info("ETH GraphQL server is disabled")
} }
return return
} }
func startIpldGraphQL(settings *s.Config) error {
if settings.IpldGraphqlEnabled {
logWithCommand.Info("starting up IPLD GraphQL server")
gqlIpldAddr, err := url.Parse(settings.IpldPostgraphileEndpoint)
if err != nil {
return err
}
gqlTracingAPIAddr, err := url.Parse(settings.TracingPostgraphileEndpoint)
if err != nil {
return err
}
ethClients, err := parseRpcAddresses(settings.EthHttpEndpoint)
if err != nil {
return err
}
var tracingClients []*rpc.Client
tracingEndpoint := viper.GetString("tracing.httpPath")
if tracingEndpoint != "" {
tracingClients, err = parseRpcAddresses(tracingEndpoint)
if err != nil {
return err
}
}
router, err := mux.NewServeMux(&mux.Options{
BasePath: "/",
EnableGraphiQL: true,
Postgraphile: mux.PostgraphileOptions{
Default: gqlIpldAddr,
TracingAPI: gqlTracingAPIAddr,
},
RPC: mux.RPCOptions{
DefaultClients: ethClients,
TracingClients: tracingClients,
},
})
if err != nil {
return err
}
go http.ListenAndServe(settings.IpldGraphqlEndpoint, router)
} else {
logWithCommand.Info("IPLD GraphQL server is disabled")
}
return nil
}
func startGroupCacheService(settings *s.Config) error {
gcc := settings.GroupCache
if gcc.Pool.Enabled {
logWithCommand.Info("starting up groupcache pool HTTTP server")
pool := groupcache.NewHTTPPoolOpts(gcc.Pool.HttpEndpoint, &groupcache.HTTPPoolOptions{})
pool.Set(gcc.Pool.PeerHttpEndpoints...)
httpURL, err := url.Parse(gcc.Pool.HttpEndpoint)
if err != nil {
return err
}
server := http.Server{
Addr: httpURL.Host,
Handler: pool,
}
// Start a HTTP server to listen for peer requests from the groupcache
go server.ListenAndServe()
logWithCommand.Infof("groupcache pool endpoint opened for url %s", httpURL)
} else {
logWithCommand.Info("Groupcache pool is disabled")
}
return nil
}
func startStateTrieValidator(config *s.Config, server s.Server) {
validateEveryNthBlock := config.StateValidationEveryNthBlock
var lastBlockNumber uint64
backend := server.Backend()
for {
time.Sleep(5 * time.Second)
block, err := backend.CurrentBlock()
if err != nil {
log.Errorln("Error fetching current block for state trie validator")
continue
}
stateRoot := block.Root()
blockNumber := block.NumberU64()
blockHash := block.Hash()
if validateEveryNthBlock <= 0 || // Used for static replicas where block number doesn't progress.
(blockNumber > lastBlockNumber) && (blockNumber%validateEveryNthBlock == 0) {
// The validate trie call will take a long time on mainnet, e.g. a few hours.
if err = backend.ValidateTrie(stateRoot); err != nil {
log.Fatalf("Error validating trie for block number %d hash %s state root %s",
blockNumber,
blockHash,
stateRoot,
)
}
log.Infof("Successfully validated trie for block number %d hash %s state root %s",
blockNumber,
blockHash,
stateRoot,
)
if validateEveryNthBlock <= 0 {
// Static replica, sleep a long-ish time (1/2 of cache expiry time) since we only need to keep the cache warm.
time.Sleep((time.Minute * time.Duration(config.GroupCache.StateDB.CacheExpiryInMins)) / 2)
}
lastBlockNumber = blockNumber
}
}
}
func parseRpcAddresses(value string) ([]*rpc.Client, error) {
rpcAddresses := strings.Split(value, ",")
rpcClients := make([]*rpc.Client, 0, len(rpcAddresses))
for _, address := range rpcAddresses {
rpcClient, err := rpc.Dial(address)
if err != nil {
logWithCommand.Errorf("couldn't connect to %s. Error: %s", address, err)
continue
}
rpcClients = append(rpcClients, rpcClient)
}
if len(rpcClients) == 0 {
logWithCommand.Error(ErrNoRpcEndpoints)
return nil, ErrNoRpcEndpoints
}
return rpcClients, nil
}
func init() { func init() {
rootCmd.AddCommand(serveCmd) rootCmd.AddCommand(serveCmd)
addDatabaseFlags(serveCmd)
// flags for all config variables // flags for all config variables
// eth graphql and json-rpc parameters serveCmd.PersistentFlags().Bool("server-graphql", false, "turn on the graphql server")
serveCmd.PersistentFlags().Bool("eth-server-graphql", false, "turn on the eth graphql server") serveCmd.PersistentFlags().String("server-graphql-endpoint", "", "endpoint url for graphql server")
serveCmd.PersistentFlags().String("eth-server-graphql-path", "", "endpoint url for eth graphql server (host:port)") serveCmd.PersistentFlags().String("server-ws-path", "", "vdb server ws path")
serveCmd.PersistentFlags().Bool("eth-server-http", true, "turn on the eth http json-rpc server") serveCmd.PersistentFlags().String("server-http-path", "", "vdb server http path")
serveCmd.PersistentFlags().String("eth-server-http-path", "", "endpoint url for eth http json-rpc server (host:port)") serveCmd.PersistentFlags().String("server-ipc-path", "", "vdb server ipc path")
serveCmd.PersistentFlags().Bool("eth-server-ws", false, "turn on the eth websocket json-rpc server")
serveCmd.PersistentFlags().String("eth-server-ws-path", "", "endpoint url for eth websocket json-rpc server (host:port)")
serveCmd.PersistentFlags().Bool("eth-server-ipc", false, "turn on the eth ipc json-rpc server")
serveCmd.PersistentFlags().String("eth-server-ipc-path", "", "path for eth ipc json-rpc server")
// ipld and tracing graphql parameters
serveCmd.PersistentFlags().Bool("ipld-server-graphql", false, "turn on the ipld graphql server")
serveCmd.PersistentFlags().String("ipld-server-graphql-path", "", "endpoint url for ipld graphql server (host:port)")
serveCmd.PersistentFlags().String("ipld-postgraphile-path", "", "http url to postgraphile on top of ipld database")
serveCmd.PersistentFlags().String("tracing-http-path", "", "http url to tracing service")
serveCmd.PersistentFlags().String("tracing-postgraphile-path", "", "http url to postgraphile on top of tracing db")
serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node") serveCmd.PersistentFlags().String("eth-http-path", "", "http url for ethereum node")
serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id") serveCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
@ -343,46 +137,13 @@ func init() {
serveCmd.PersistentFlags().String("eth-chain-id", "1", "eth chain id") serveCmd.PersistentFlags().String("eth-chain-id", "1", "eth chain id")
serveCmd.PersistentFlags().String("eth-default-sender", "", "default sender address") serveCmd.PersistentFlags().String("eth-default-sender", "", "default sender address")
serveCmd.PersistentFlags().String("eth-rpc-gas-cap", "", "rpc gas cap (for eth_Call execution)") serveCmd.PersistentFlags().String("eth-rpc-gas-cap", "", "rpc gas cap (for eth_Call execution)")
serveCmd.PersistentFlags().String("eth-chain-config", "", "json chain config file location")
serveCmd.PersistentFlags().Bool("eth-supports-state-diff", false, "whether the proxy ethereum client supports statediffing endpoints")
serveCmd.PersistentFlags().Bool("eth-forward-eth-calls", false, "whether to immediately forward eth_calls to proxy client")
serveCmd.PersistentFlags().Bool("eth-proxy-on-error", true, "whether to forward all failed calls to proxy client")
// groupcache flags
serveCmd.PersistentFlags().Bool("gcache-pool-enabled", false, "turn on the groupcache pool")
serveCmd.PersistentFlags().String("gcache-pool-http-path", "", "http url for groupcache node")
serveCmd.PersistentFlags().StringArray("gcache-pool-http-peers", []string{}, "http urls for groupcache peers")
serveCmd.PersistentFlags().Int("gcache-statedb-cache-size", 16, "state DB cache size in MB")
serveCmd.PersistentFlags().Int("gcache-statedb-cache-expiry", 60, "state DB cache expiry time in mins")
serveCmd.PersistentFlags().Int("gcache-statedb-log-stats-interval", 60, "state DB cache stats log interval in secs")
// state validator flags
serveCmd.PersistentFlags().Bool("validator-enabled", false, "turn on the state validator")
serveCmd.PersistentFlags().Uint("validator-every-nth-block", 1500, "only validate every Nth block")
// and their bindings // and their bindings
// eth graphql server viper.BindPFlag("server.graphql", serveCmd.PersistentFlags().Lookup("server-graphql"))
viper.BindPFlag("eth.server.graphql", serveCmd.PersistentFlags().Lookup("eth-server-graphql")) viper.BindPFlag("server.graphqlEndpoint", serveCmd.PersistentFlags().Lookup("server-graphql-endpoint"))
viper.BindPFlag("eth.server.graphqlPath", serveCmd.PersistentFlags().Lookup("eth-server-graphql-path")) viper.BindPFlag("server.wsPath", serveCmd.PersistentFlags().Lookup("server-ws-path"))
viper.BindPFlag("server.httpPath", serveCmd.PersistentFlags().Lookup("server-http-path"))
// eth http json-rpc server viper.BindPFlag("server.ipcPath", serveCmd.PersistentFlags().Lookup("server-ipc-path"))
viper.BindPFlag("eth.server.http", serveCmd.PersistentFlags().Lookup("eth-server-http"))
viper.BindPFlag("eth.server.httpPath", serveCmd.PersistentFlags().Lookup("eth-server-http-path"))
// eth websocket json-rpc server
viper.BindPFlag("eth.server.ws", serveCmd.PersistentFlags().Lookup("eth-server-ws"))
viper.BindPFlag("eth.server.wsPath", serveCmd.PersistentFlags().Lookup("eth-server-ws-path"))
// eth ipc json-rpc server
viper.BindPFlag("eth.server.ipc", serveCmd.PersistentFlags().Lookup("eth-server-ipc"))
viper.BindPFlag("eth.server.ipcPath", serveCmd.PersistentFlags().Lookup("eth-server-ipc-path"))
// ipld and tracing graphql parameters
viper.BindPFlag("ipld.server.graphql", serveCmd.PersistentFlags().Lookup("ipld-server-graphql"))
viper.BindPFlag("ipld.server.graphqlPath", serveCmd.PersistentFlags().Lookup("ipld-server-graphql-path"))
viper.BindPFlag("ipld.postgraphilePath", serveCmd.PersistentFlags().Lookup("ipld-postgraphile-path"))
viper.BindPFlag("tracing.httpPath", serveCmd.PersistentFlags().Lookup("tracing-http-path"))
viper.BindPFlag("tracing.postgraphilePath", serveCmd.PersistentFlags().Lookup("tracing-postgraphile-path"))
viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path")) viper.BindPFlag("ethereum.httpPath", serveCmd.PersistentFlags().Lookup("eth-http-path"))
viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id")) viper.BindPFlag("ethereum.nodeID", serveCmd.PersistentFlags().Lookup("eth-node-id"))
@ -392,20 +153,4 @@ func init() {
viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id")) viper.BindPFlag("ethereum.chainID", serveCmd.PersistentFlags().Lookup("eth-chain-id"))
viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender")) viper.BindPFlag("ethereum.defaultSender", serveCmd.PersistentFlags().Lookup("eth-default-sender"))
viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap")) viper.BindPFlag("ethereum.rpcGasCap", serveCmd.PersistentFlags().Lookup("eth-rpc-gas-cap"))
viper.BindPFlag("ethereum.chainConfig", serveCmd.PersistentFlags().Lookup("eth-chain-config"))
viper.BindPFlag("ethereum.supportsStateDiff", serveCmd.PersistentFlags().Lookup("eth-supports-state-diff"))
viper.BindPFlag("ethereum.forwardEthCalls", serveCmd.PersistentFlags().Lookup("eth-forward-eth-calls"))
viper.BindPFlag("ethereum.proxyOnError", serveCmd.PersistentFlags().Lookup("eth-proxy-on-error"))
// groupcache flags
viper.BindPFlag("groupcache.pool.enabled", serveCmd.PersistentFlags().Lookup("gcache-pool-enabled"))
viper.BindPFlag("groupcache.pool.httpEndpoint", serveCmd.PersistentFlags().Lookup("gcache-pool-http-path"))
viper.BindPFlag("groupcache.pool.peerHttpEndpoints", serveCmd.PersistentFlags().Lookup("gcache-pool-http-peers"))
viper.BindPFlag("groupcache.statedb.cacheSizeInMB", serveCmd.PersistentFlags().Lookup("gcache-statedb-cache-size"))
viper.BindPFlag("groupcache.statedb.cacheExpiryInMins", serveCmd.PersistentFlags().Lookup("gcache-statedb-cache-expiry"))
viper.BindPFlag("groupcache.statedb.logStatsIntervalInSecs", serveCmd.PersistentFlags().Lookup("gcache-statedb-log-stats-interval"))
// state validator flags
viper.BindPFlag("validator.enabled", serveCmd.PersistentFlags().Lookup("validator-enabled"))
viper.BindPFlag("validator.everyNthBlock", serveCmd.PersistentFlags().Lookup("validator-every-nth-block"))
} }

View File

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -27,9 +28,9 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/v3/pkg/client" "github.com/vulcanize/ipld-eth-server/pkg/client"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
w "github.com/vulcanize/ipld-eth-server/v3/pkg/serve" w "github.com/vulcanize/ipld-eth-server/pkg/serve"
) )
// subscribeCmd represents the subscribe command // subscribeCmd represents the subscribe command
@ -128,7 +129,7 @@ func subscribe() {
} }
// This assumes leafs only // This assumes leafs only
for _, stateNode := range ethData.StateNodes { for _, stateNode := range ethData.StateNodes {
var acct types.StateAccount var acct state.Account
err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct) err = rlp.DecodeBytes(stateNode.IPLD.Data, &acct)
if err != nil { if err != nil {
logWithCommand.Error(err) logWithCommand.Error(err)

View File

@ -1,87 +0,0 @@
// Copyright © 2021 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"time"
"github.com/ethereum/go-ethereum/common"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
s "github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
)
const GroupName = "statedb-validate"
const CacheExpiryInMins = 8 * 60 // 8 hours
const CacheSizeInMB = 16 // 16 MB
var validateCmd = &cobra.Command{
Use: "validate",
Short: "valdiate state",
Long: `This command validates the trie for the given state root`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *log.WithField("SubCommand", subCommand)
validate()
},
}
func validate() {
config, err := s.NewConfig()
if err != nil {
logWithCommand.Fatal(err)
}
stateRootStr := viper.GetString("stateRoot")
if stateRootStr == "" {
logWithCommand.Fatal("must provide a state root for state validation")
}
stateRoot := common.HexToHash(stateRootStr)
cacheSize := viper.GetInt("cacheSize")
ethDB := ipfsethdb.NewDatabase(config.DB, ipfsethdb.CacheConfig{
Name: GroupName,
Size: cacheSize * 1024 * 1024,
ExpiryDuration: time.Minute * time.Duration(CacheExpiryInMins),
})
val := validator.NewValidator(nil, ethDB)
if err = val.ValidateTrie(stateRoot); err != nil {
log.Fatalln("Error validating state root")
}
stats := ethDB.(*ipfsethdb.Database).GetCacheStats()
log.Debugf("groupcache stats %+v", stats)
log.Infoln("Successfully validated state root")
}
func init() {
rootCmd.AddCommand(validateCmd)
addDatabaseFlags(validateCmd)
validateCmd.PersistentFlags().String("state-root", "", "root of the state trie we wish to validate")
viper.BindPFlag("stateRoot", validateCmd.PersistentFlags().Lookup("state-root"))
validateCmd.PersistentFlags().Int("cache-size", CacheSizeInMB, "cache size in MB")
viper.BindPFlag("cacheSize", validateCmd.PersistentFlags().Lookup("cache-size"))
}

View File

@ -19,7 +19,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
v "github.com/vulcanize/ipld-eth-server/v3/version" v "github.com/vulcanize/ipld-eth-server/version"
) )
// versionCmd represents the version command // versionCmd represents the version command

View File

@ -0,0 +1,8 @@
-- +goose Up
CREATE TABLE IF NOT EXISTS public.blocks (
key TEXT UNIQUE NOT NULL,
data BYTEA NOT NULL
);
-- +goose Down
DROP TABLE public.blocks;

View File

@ -0,0 +1,12 @@
-- +goose Up
CREATE TABLE nodes (
id SERIAL PRIMARY KEY,
client_name VARCHAR,
genesis_block VARCHAR(66),
network_id VARCHAR,
node_id VARCHAR(128),
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
);
-- +goose Down
DROP TABLE nodes;

View File

@ -0,0 +1,5 @@
-- +goose Up
CREATE SCHEMA eth;
-- +goose Down
DROP SCHEMA eth;

View File

@ -0,0 +1,23 @@
-- +goose Up
CREATE TABLE eth.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
td NUMERIC NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
reward NUMERIC NOT NULL,
state_root VARCHAR(66) NOT NULL,
tx_root VARCHAR(66) NOT NULL,
receipt_root VARCHAR(66) NOT NULL,
uncle_root VARCHAR(66) NOT NULL,
bloom BYTEA NOT NULL,
timestamp NUMERIC NOT NULL,
times_validated INTEGER NOT NULL DEFAULT 1,
UNIQUE (block_number, block_hash)
);
-- +goose Down
DROP TABLE eth.header_cids;

View File

@ -0,0 +1,14 @@
-- +goose Up
CREATE TABLE eth.uncle_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
reward NUMERIC NOT NULL,
UNIQUE (header_id, block_hash)
);
-- +goose Down
DROP TABLE eth.uncle_cids;

View File

@ -0,0 +1,17 @@
-- +goose Up
CREATE TABLE eth.transaction_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
tx_hash VARCHAR(66) NOT NULL,
index INTEGER NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL,
deployment BOOL NOT NULL,
tx_data BYTEA,
UNIQUE (header_id, tx_hash)
);
-- +goose Down
DROP TABLE eth.transaction_cids;

View File

@ -0,0 +1,18 @@
-- +goose Up
CREATE TABLE eth.receipt_cids (
id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
contract VARCHAR(66),
contract_hash VARCHAR(66),
topic0s VARCHAR(66)[],
topic1s VARCHAR(66)[],
topic2s VARCHAR(66)[],
topic3s VARCHAR(66)[],
log_contracts VARCHAR(66)[],
UNIQUE (tx_id)
);
-- +goose Down
DROP TABLE eth.receipt_cids;

View File

@ -0,0 +1,15 @@
-- +goose Up
CREATE TABLE eth.state_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_path BYTEA,
node_type INTEGER NOT NULL,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (header_id, state_path)
);
-- +goose Down
DROP TABLE eth.state_cids;

View File

@ -0,0 +1,15 @@
-- +goose Up
CREATE TABLE eth.storage_cids (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_path BYTEA,
node_type INTEGER NOT NULL,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (state_id, storage_path)
);
-- +goose Down
DROP TABLE eth.storage_cids;

View File

@ -0,0 +1,13 @@
-- +goose Up
CREATE TABLE eth.state_accounts (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
balance NUMERIC NOT NULL,
nonce INTEGER NOT NULL,
code_hash BYTEA NOT NULL,
storage_root VARCHAR(66) NOT NULL,
UNIQUE (state_id)
);
-- +goose Down
DROP TABLE eth.state_accounts;

View File

@ -0,0 +1,6 @@
-- +goose Up
COMMENT ON TABLE public.nodes IS E'@name NodeInfo';
COMMENT ON TABLE eth.transaction_cids IS E'@name EthTransactionCids';
COMMENT ON TABLE eth.header_cids IS E'@name EthHeaderCids';
COMMENT ON COLUMN public.nodes.node_id IS E'@name ChainNodeID';
COMMENT ON COLUMN eth.header_cids.node_id IS E'@name EthNodeID';

View File

@ -0,0 +1,21 @@
-- +goose Up
ALTER TABLE public.nodes
ADD COLUMN chain_id INTEGER DEFAULT 1;
ALTER TABLE public.nodes
DROP CONSTRAINT node_uc;
ALTER TABLE public.nodes
ADD CONSTRAINT node_uc
UNIQUE (genesis_block, network_id, node_id, chain_id);
-- +goose Down
ALTER TABLE public.nodes
DROP CONSTRAINT node_uc;
ALTER TABLE public.nodes
ADD CONSTRAINT node_uc
UNIQUE (genesis_block, network_id, node_id);
ALTER TABLE public.nodes
DROP COLUMN chain_id;

View File

@ -0,0 +1,69 @@
-- +goose Up
-- +goose StatementBegin
CREATE FUNCTION eth.graphql_subscription() returns TRIGGER as $$
declare
table_name text = TG_ARGV[0];
attribute text = TG_ARGV[1];
id text;
begin
execute 'select $1.' || quote_ident(attribute)
using new
into id;
perform pg_notify('postgraphile:' || table_name,
json_build_object(
'__node__', json_build_array(
table_name,
id
)
)::text
);
return new;
end;
$$ language plpgsql;
-- +goose StatementEnd
CREATE TRIGGER header_cids_ai
after INSERT ON eth.header_cids
for each row
execute procedure eth.graphql_subscription('header_cids', 'id');
CREATE TRIGGER receipt_cids_ai
after INSERT ON eth.receipt_cids
for each row
execute procedure eth.graphql_subscription('receipt_cids', 'id');
CREATE TRIGGER state_accounts_ai
after INSERT ON eth.state_accounts
for each row
execute procedure eth.graphql_subscription('state_accounts', 'id');
CREATE TRIGGER state_cids_ai
after INSERT ON eth.state_cids
for each row
execute procedure eth.graphql_subscription('state_cids', 'id');
CREATE TRIGGER storage_cids_ai
after INSERT ON eth.storage_cids
for each row
execute procedure eth.graphql_subscription('storage_cids', 'id');
CREATE TRIGGER transaction_cids_ai
after INSERT ON eth.transaction_cids
for each row
execute procedure eth.graphql_subscription('transaction_cids', 'id');
CREATE TRIGGER uncle_cids_ai
after INSERT ON eth.uncle_cids
for each row
execute procedure eth.graphql_subscription('uncle_cids', 'id');
-- +goose Down
DROP TRIGGER uncle_cids_ai ON eth.uncle_cids;
DROP TRIGGER transaction_cids_ai ON eth.transaction_cids;
DROP TRIGGER storage_cids_ai ON eth.storage_cids;
DROP TRIGGER state_cids_ai ON eth.state_cids;
DROP TRIGGER state_accounts_ai ON eth.state_accounts;
DROP TRIGGER receipt_cids_ai ON eth.receipt_cids;
DROP TRIGGER header_cids_ai ON eth.header_cids;
DROP FUNCTION eth.graphql_subscription();

View File

@ -0,0 +1,121 @@
-- +goose Up
-- header indexes
CREATE INDEX block_number_index ON eth.header_cids USING brin (block_number);
CREATE INDEX block_hash_index ON eth.header_cids USING btree (block_hash);
CREATE INDEX header_cid_index ON eth.header_cids USING btree (cid);
CREATE INDEX header_mh_index ON eth.header_cids USING btree (mh_key);
CREATE INDEX state_root_index ON eth.header_cids USING btree (state_root);
CREATE INDEX timestamp_index ON eth.header_cids USING brin (timestamp);
-- transaction indexes
CREATE INDEX tx_header_id_index ON eth.transaction_cids USING btree (header_id);
CREATE INDEX tx_hash_index ON eth.transaction_cids USING btree (tx_hash);
CREATE INDEX tx_cid_index ON eth.transaction_cids USING btree (cid);
CREATE INDEX tx_mh_index ON eth.transaction_cids USING btree (mh_key);
CREATE INDEX tx_dst_index ON eth.transaction_cids USING btree (dst);
CREATE INDEX tx_src_index ON eth.transaction_cids USING btree (src);
-- receipt indexes
CREATE INDEX rct_tx_id_index ON eth.receipt_cids USING btree (tx_id);
CREATE INDEX rct_cid_index ON eth.receipt_cids USING btree (cid);
CREATE INDEX rct_mh_index ON eth.receipt_cids USING btree (mh_key);
CREATE INDEX rct_contract_index ON eth.receipt_cids USING btree (contract);
CREATE INDEX rct_contract_hash_index ON eth.receipt_cids USING btree (contract_hash);
CREATE INDEX rct_topic0_index ON eth.receipt_cids USING gin (topic0s);
CREATE INDEX rct_topic1_index ON eth.receipt_cids USING gin (topic1s);
CREATE INDEX rct_topic2_index ON eth.receipt_cids USING gin (topic2s);
CREATE INDEX rct_topic3_index ON eth.receipt_cids USING gin (topic3s);
CREATE INDEX rct_log_contract_index ON eth.receipt_cids USING gin (log_contracts);
-- state node indexes
CREATE INDEX state_header_id_index ON eth.state_cids USING btree (header_id);
CREATE INDEX state_leaf_key_index ON eth.state_cids USING btree (state_leaf_key);
CREATE INDEX state_cid_index ON eth.state_cids USING btree (cid);
CREATE INDEX state_mh_index ON eth.state_cids USING btree (mh_key);
CREATE INDEX state_path_index ON eth.state_cids USING btree (state_path);
-- storage node indexes
CREATE INDEX storage_state_id_index ON eth.storage_cids USING btree (state_id);
CREATE INDEX storage_leaf_key_index ON eth.storage_cids USING btree (storage_leaf_key);
CREATE INDEX storage_cid_index ON eth.storage_cids USING btree (cid);
CREATE INDEX storage_mh_index ON eth.storage_cids USING btree (mh_key);
CREATE INDEX storage_path_index ON eth.storage_cids USING btree (storage_path);
-- state accounts indexes
CREATE INDEX account_state_id_index ON eth.state_accounts USING btree (state_id);
CREATE INDEX storage_root_index ON eth.state_accounts USING btree (storage_root);
-- +goose Down
-- state account indexes
DROP INDEX eth.storage_root_index;
DROP INDEX eth.account_state_id_index;
-- storage node indexes
DROP INDEX eth.storage_path_index;
DROP INDEX eth.storage_mh_index;
DROP INDEX eth.storage_cid_index;
DROP INDEX eth.storage_leaf_key_index;
DROP INDEX eth.storage_state_id_index;
-- state node indexes
DROP INDEX eth.state_path_index;
DROP INDEX eth.state_mh_index;
DROP INDEX eth.state_cid_index;
DROP INDEX eth.state_leaf_key_index;
DROP INDEX eth.state_header_id_index;
-- receipt indexes
DROP INDEX eth.rct_log_contract_index;
DROP INDEX eth.rct_topic3_index;
DROP INDEX eth.rct_topic2_index;
DROP INDEX eth.rct_topic1_index;
DROP INDEX eth.rct_topic0_index;
DROP INDEX eth.rct_contract_hash_index;
DROP INDEX eth.rct_contract_index;
DROP INDEX eth.rct_mh_index;
DROP INDEX eth.rct_cid_index;
DROP INDEX eth.rct_tx_id_index;
-- transaction indexes
DROP INDEX eth.tx_src_index;
DROP INDEX eth.tx_dst_index;
DROP INDEX eth.tx_mh_index;
DROP INDEX eth.tx_cid_index;
DROP INDEX eth.tx_hash_index;
DROP INDEX eth.tx_header_id_index;
-- header indexes
DROP INDEX eth.timestamp_index;
DROP INDEX eth.state_root_index;
DROP INDEX eth.header_mh_index;
DROP INDEX eth.header_cid_index;
DROP INDEX eth.block_hash_index;
DROP INDEX eth.block_number_index;

View File

@ -0,0 +1,48 @@
-- +goose Up
-- +goose StatementBegin
-- returns the number of child headers that reference backwards to the header with the provided hash
CREATE OR REPLACE FUNCTION header_weight(hash VARCHAR(66)) RETURNS BIGINT
AS $$
WITH RECURSIVE validator AS (
SELECT block_hash, parent_hash, block_number
FROM eth.header_cids
WHERE block_hash = hash
UNION
SELECT eth.header_cids.block_hash, eth.header_cids.parent_hash, eth.header_cids.block_number
FROM eth.header_cids
INNER JOIN validator
ON eth.header_cids.parent_hash = validator.block_hash
AND eth.header_cids.block_number = validator.block_number + 1
)
SELECT COUNT(*) FROM validator;
$$ LANGUAGE SQL;
-- +goose StatementEnd
-- +goose StatementBegin
-- returns the id for the header at the provided height which is heaviest
CREATE OR REPLACE FUNCTION canonical_header(height BIGINT) RETURNS INT AS
$BODY$
DECLARE
current_weight INT;
heaviest_weight INT DEFAULT 0;
heaviest_id INT;
r eth.header_cids%ROWTYPE;
BEGIN
FOR r IN SELECT * FROM eth.header_cids
WHERE block_number = height
LOOP
SELECT INTO current_weight * FROM header_weight(r.block_hash);
IF current_weight > heaviest_weight THEN
heaviest_weight := current_weight;
heaviest_id := r.id;
END IF;
END LOOP;
RETURN heaviest_id;
END
$BODY$
LANGUAGE 'plpgsql';
-- +goose StatementEnd
-- +goose Down
DROP FUNCTION header_weight;
DROP FUNCTION canonical_header;

View File

@ -0,0 +1,7 @@
-- +goose Up
ALTER TABLE eth.transaction_cids
DROP COLUMN deployment;
-- +goose Down
ALTER TABLE eth.transaction_cids
ADD COLUMN deployment BOOL NOT NULL DEFAULT FALSE;

View File

@ -0,0 +1,25 @@
-- +goose Up
ALTER TABLE eth.storage_cids
ALTER COLUMN state_id TYPE BIGINT;
ALTER TABLE eth.state_accounts
ALTER COLUMN state_id TYPE BIGINT;
ALTER TABLE eth.state_cids
ALTER COLUMN id TYPE BIGINT;
ALTER TABLE eth.storage_cids
ALTER COLUMN id TYPE BIGINT;
-- +goose Down
ALTER TABLE eth.storage_cids
ALTER COLUMN id TYPE INTEGER;
ALTER TABLE eth.state_cids
ALTER COLUMN id TYPE INTEGER;
ALTER TABLE eth.state_accounts
ALTER COLUMN state_id TYPE INTEGER;
ALTER TABLE eth.storage_cids
ALTER COLUMN state_id TYPE INTEGER;

View File

@ -0,0 +1,39 @@
-- +goose Up
-- +goose StatementBegin
-- returns if a storage node at the provided path was removed in the range > the provided height and <= the provided block hash
CREATE OR REPLACE FUNCTION was_storage_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
AS $$
SELECT exists(SELECT 1
FROM eth.storage_cids
INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE storage_path = path
AND block_number > height
AND block_number <= (SELECT block_number
FROM eth.header_cids
WHERE block_hash = hash)
AND storage_cids.node_type = 3
LIMIT 1);
$$ LANGUAGE SQL;
-- +goose StatementEnd
-- +goose StatementBegin
-- returns if a state node at the provided path was removed in the range > the provided height and <= the provided block hash
CREATE OR REPLACE FUNCTION was_state_removed(path BYTEA, height BIGINT, hash VARCHAR(66)) RETURNS BOOLEAN
AS $$
SELECT exists(SELECT 1
FROM eth.state_cids
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE state_path = path
AND block_number > height
AND block_number <= (SELECT block_number
FROM eth.header_cids
WHERE block_hash = hash)
AND state_cids.node_type = 3
LIMIT 1);
$$ LANGUAGE SQL;
-- +goose StatementEnd
-- +goose Down
DROP FUNCTION was_storage_removed;
DROP FUNCTION was_state_removed;

View File

@ -0,0 +1,121 @@
-- +goose Up
-- +goose StatementBegin
CREATE TYPE child_result AS (
has_child BOOLEAN,
children eth.header_cids[]
);
CREATE OR REPLACE FUNCTION has_child(hash VARCHAR(66), height BIGINT) RETURNS child_result AS
$BODY$
DECLARE
child_height INT;
temp_child eth.header_cids;
new_child_result child_result;
BEGIN
child_height = height + 1;
-- short circuit if there are no children
SELECT exists(SELECT 1
FROM eth.header_cids
WHERE parent_hash = hash
AND block_number = child_height
LIMIT 1)
INTO new_child_result.has_child;
-- collect all the children for this header
IF new_child_result.has_child THEN
FOR temp_child IN
SELECT * FROM eth.header_cids WHERE parent_hash = hash AND block_number = child_height
LOOP
new_child_result.children = array_append(new_child_result.children, temp_child);
END LOOP;
END IF;
RETURN new_child_result;
END
$BODY$
LANGUAGE 'plpgsql';
-- +goose StatementEnd
-- +goose StatementBegin
CREATE OR REPLACE FUNCTION canonical_header_from_array(headers eth.header_cids[]) RETURNS eth.header_cids AS
$BODY$
DECLARE
canonical_header eth.header_cids;
canonical_child eth.header_cids;
header eth.header_cids;
current_child_result child_result;
child_headers eth.header_cids[];
current_header_with_child eth.header_cids;
has_children_count INT DEFAULT 0;
BEGIN
-- for each header in the provided set
FOREACH header IN ARRAY headers
LOOP
-- check if it has any children
current_child_result = has_child(header.block_hash, header.block_number);
IF current_child_result.has_child THEN
-- if it does, take note
has_children_count = has_children_count + 1;
current_header_with_child = header;
-- and add the children to the growing set of child headers
child_headers = array_cat(child_headers, current_child_result.children);
END IF;
END LOOP;
-- if none of the headers had children, none is more canonical than the other
IF has_children_count = 0 THEN
-- return the first one selected
SELECT * INTO canonical_header FROM unnest(headers) LIMIT 1;
-- if only one header had children, it can be considered the heaviest/canonical header of the set
ELSIF has_children_count = 1 THEN
-- return the only header with a child
canonical_header = current_header_with_child;
-- if there are multiple headers with children
ELSE
-- find the canonical header from the child set
canonical_child = canonical_header_from_array(child_headers);
-- the header that is parent to this header, is the canonical header at this level
SELECT * INTO canonical_header FROM unnest(headers)
WHERE block_hash = canonical_child.parent_hash;
END IF;
RETURN canonical_header;
END
$BODY$
LANGUAGE 'plpgsql';
-- +goose StatementEnd
-- +goose StatementBegin
CREATE OR REPLACE FUNCTION canonical_header_id(height BIGINT) RETURNS INTEGER AS
$BODY$
DECLARE
canonical_header eth.header_cids;
headers eth.header_cids[];
header_count INT;
temp_header eth.header_cids;
BEGIN
-- collect all headers at this height
FOR temp_header IN
SELECT * FROM eth.header_cids WHERE block_number = height
LOOP
headers = array_append(headers, temp_header);
END LOOP;
-- count the number of headers collected
header_count = array_length(headers, 1);
-- if we have less than 1 header, return NULL
IF header_count IS NULL OR header_count < 1 THEN
RETURN NULL;
-- if we have one header, return its id
ELSIF header_count = 1 THEN
RETURN headers[1].id;
-- if we have multiple headers we need to determine which one is canonical
ELSE
canonical_header = canonical_header_from_array(headers);
RETURN canonical_header.id;
END IF;
END;
$BODY$
LANGUAGE 'plpgsql';
-- +goose StatementEnd
-- +goose Down
DROP FUNCTION canonical_header_id;
DROP FUNCTION canonical_header_from_array;
DROP FUNCTION has_child;
DROP TYPE child_result;

View File

@ -0,0 +1,13 @@
-- +goose Up
ALTER TABLE eth.receipt_cids
ADD COLUMN post_state VARCHAR(66);
ALTER TABLE eth.receipt_cids
ADD COLUMN post_status INTEGER;
-- +goose Down
ALTER TABLE eth.receipt_cids
DROP COLUMN post_status;
ALTER TABLE eth.receipt_cids
DROP COLUMN post_state;

1252
db/schema.sql Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
version: '3.2'
services:
contract:
build:
context: ./test/contract
args:
ETH_ADDR: "http://go-ethereum:8545"
environment:
ETH_ADDR: "http://go-ethereum:8545"
ports:
- "127.0.0.1:3000:3000"

View File

@ -1,54 +1,68 @@
version: '3.2' version: '3.2'
services: services:
ipld-eth-db: dapptools:
restart: unless-stopped
image: vulcanize/dapptools:v0.29.0-statediff-0.0.2
ports:
- "127.0.0.1:8545:8545"
- "127.0.0.1:8546:8546"
db:
restart: always restart: always
image: vulcanize/ipld-eth-db:v3.2.0 image: postgres:10.12-alpine
environment: environment:
POSTGRES_USER: "vdbm" POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_testing" POSTGRES_DB: "vulcanize_public"
POSTGRES_PASSWORD: "password" POSTGRES_PASSWORD: "password"
volumes: volumes:
- vdb_db_eth_server:/var/lib/postgresql/data - vdb_db_eth_server:/var/lib/postgresql/data
ports: ports:
- "127.0.0.1:8077:5432" - "127.0.0.1:8077:5432"
command: ["postgres", "-c", "log_statement=all"]
eth-server: eth-indexer:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
- ipld-eth-db - db
- dapptools
image: vulcanize/ipld-eth-indexer:v0.3.0-alpha
environment:
DATABASE_NAME: vulcanize_public
DATABASE_HOSTNAME: db
DATABASE_PORT: 5432
DATABASE_USER: vdbm
DATABASE_PASSWORD: password
ETH_WS_PATH: "dapptools:8546"
ETH_HTTP_PATH: "dapptools:8545"
ETH_CHAIN_ID: 4
ETH_NETWORK_ID: 4
VDB_COMMAND: sync
eth-server:
depends_on:
- db
build: build:
context: ./ context: ./
cache_from: cache_from:
- alpine:latest - alpine:latest
- golang:1.13-alpine - golang:1.13-alpine
environment: environment:
IPLD_SERVER_GRAPHQL: "true"
IPLD_POSTGRAPHILEPATH: http://graphql:5000
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
VDB_COMMAND: "serve" VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json" DATABASE_NAME: "vulcanize_public"
DATABASE_NAME: "vulcanize_testing" DATABASE_HOSTNAME: "db"
DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432 DATABASE_PORT: 5432
DATABASE_USER: "vdbm" DATABASE_USER: "vdbm"
DATABASE_PASSWORD: "password" DATABASE_PASSWORD: "password"
ETH_CHAIN_ID: 4 SERVER_WS_PATH: "0.0.0.0:8081"
ETH_FORWARD_ETH_CALLS: $ETH_FORWARD_ETH_CALLS SERVER_HTTP_PATH: "0.0.0.0:8082"
ETH_PROXY_ON_ERROR: $ETH_PROXY_ON_ERROR
ETH_HTTP_PATH: $ETH_HTTP_PATH
volumes:
- type: bind
source: ./chain.json
target: /tmp/chain.json
ports: ports:
- "127.0.0.1:8080:8080"
- "127.0.0.1:8081:8081" - "127.0.0.1:8081:8081"
graphql: graphql:
restart: unless-stopped restart: unless-stopped
depends_on: depends_on:
- ipld-eth-db - db
image: vulcanize/postgraphile:v1.0.1 image: vulcanize/postgraphile:v1.0.1
environment: environment:
- PG_HOST=db - PG_HOST=db

View File

@ -12,7 +12,7 @@ We can expose a number of different APIs for remote access to ipld-eth-server da
ipld-eth-server stores all processed data in Postgres using PG-IPFS, this includes all of the IPLD objects. ipld-eth-server stores all processed data in Postgres using PG-IPFS, this includes all of the IPLD objects.
[Postgraphile](https://www.graphile.org/postgraphile/) can be used to expose GraphQL endpoints for the Postgres tables. [Postgraphile](https://www.graphile.org/postgraphile/) can be used to expose GraphQL endpoints for the Postgres tables.
e.g. e.g.
`postgraphile --plugins @graphile/pg-pubsub --subscriptions --simple-subscriptions -c postgres://localhost:5432/vulcanize_public?sslmode=disable -s public,btc,eth -a -j` `postgraphile --plugins @graphile/pg-pubsub --subscriptions --simple-subscriptions -c postgres://localhost:5432/vulcanize_public?sslmode=disable -s public,btc,eth -a -j`
@ -33,16 +33,16 @@ by ipld-eth-server to filter and return a requested subset of chain data to the
An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-server using the `Stream` RPC method is provided below An example of how to subscribe to a real-time Ethereum data feed from ipld-eth-server using the `Stream` RPC method is provided below
```go ```go
package main package main
import ( import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/v3/pkg/client" "github.com/vulcanize/ipld-eth-server/pkg/client"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch" "github.com/vulcanize/ipld-eth-server/pkg/watch"
) )
config, _ := eth.NewEthSubscriptionConfig() config, _ := eth.NewEthSubscriptionConfig()
@ -153,16 +153,16 @@ the addresses in the `addresses` fields are pre-hashed ETH addresses.
An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-server using the `Stream` RPC method is provided below An example of how to subscribe to a real-time Bitcoin data feed from ipld-eth-server using the `Stream` RPC method is provided below
```go ```go
package main package main
import ( import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/v3/pkg/btc" "github.com/vulcanize/ipld-eth-server/pkg/btc"
"github.com/vulcanize/ipld-eth-server/v3/pkg/client" "github.com/vulcanize/ipld-eth-server/pkg/client"
"github.com/vulcanize/ipld-eth-server/v3/pkg/watch" "github.com/vulcanize/ipld-eth-server/pkg/watch"
) )
config, _ := btc.NewBtcSubscriptionConfig() config, _ := btc.NewBtcSubscriptionConfig()

View File

@ -1,4 +1,20 @@
#!/bin/sh #!/bin/sh
# Runs the db migrations and starts the watcher services
# Construct the connection string for postgres
VDB_PG_CONNECT=postgresql://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOSTNAME:$DATABASE_PORT/$DATABASE_NAME?sslmode=disable
# Run the DB migrations
echo "Connecting with: $VDB_PG_CONNECT"
echo "Running database migrations"
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
rv=$?
if [ $rv != 0 ]; then
echo "Could not run migrations. Are the database details correct?"
exit 1
fi
echo "Beginning the ipld-eth-server process" echo "Beginning the ipld-eth-server process"

View File

@ -16,15 +16,12 @@
graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT graphqlEndpoint = "127.0.0.1:8083" # $SERVER_GRAPHQL_ENDPOINT
[ethereum] [ethereum]
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
chainID = "1" # $ETH_CHAIN_ID chainID = "1" # $ETH_CHAIN_ID
defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR defaultSender = "" # $ETH_DEFAULT_SENDER_ADDR
rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP rpcGasCap = "1000000000000" # $ETH_RPC_GAS_CAP
httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH httpPath = "127.0.0.1:8545" # $ETH_HTTP_PATH
supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF supportsStateDiff = true # $ETH_SUPPORTS_STATEDIFF
forwardEthCalls = false # $ETH_FORWARD_ETH_CALLS
proxyOnError = true # $ETH_PROXY_ON_ERROR
nodeID = "arch1" # $ETH_NODE_ID nodeID = "arch1" # $ETH_NODE_ID
clientName = "Geth" # $ETH_CLIENT_NAME clientName = "Geth" # $ETH_CLIENT_NAME
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
networkID = "1" # $ETH_NETWORK_ID networkID = "1" # $ETH_NETWORK_ID

300
go.mod
View File

@ -1,291 +1,29 @@
module github.com/vulcanize/ipld-eth-server/v3 module github.com/vulcanize/ipld-eth-server
go 1.18 go 1.13
require ( require (
github.com/ethereum/go-ethereum v1.10.18 github.com/ethereum/go-ethereum v1.9.25
github.com/graph-gophers/graphql-go v1.3.0 github.com/graph-gophers/graphql-go v0.0.0-20201003130358-c5bdf3b1108e
github.com/ipfs/go-block-format v0.0.3 github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cid v0.0.7
github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-blockstore v1.0.1
github.com/ipfs/go-ipfs-ds-help v1.0.0 github.com/ipfs/go-ipfs-ds-help v1.0.0
github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-ipld-format v0.2.0
github.com/jinzhu/now v1.1.5 // indirect github.com/jmoiron/sqlx v1.2.0
github.com/jmoiron/sqlx v1.3.5 github.com/lib/pq v1.8.0
github.com/joho/godotenv v1.4.0 github.com/multiformats/go-multihash v0.0.14
github.com/lib/pq v1.10.5 github.com/onsi/ginkgo v1.15.0
github.com/machinebox/graphql v0.2.2 github.com/onsi/gomega v1.10.1
github.com/mailgun/groupcache/v2 v2.3.0 github.com/prometheus/client_golang v1.5.1
github.com/multiformats/go-multihash v0.1.0 github.com/sirupsen/logrus v1.6.0
github.com/onsi/ginkgo v1.16.5 github.com/spf13/cobra v1.0.0
github.com/onsi/gomega v1.19.0 github.com/spf13/viper v1.7.0
github.com/prometheus/client_golang v1.11.0 github.com/vulcanize/ipld-eth-indexer v0.7.0-alpha
github.com/sirupsen/logrus v1.8.1 github.com/vulcanize/ipfs-ethdb v0.0.2-alpha
github.com/spf13/cobra v1.4.0 golang.org/x/sys v0.0.0-20210218155724-8ebf48af031b // indirect
github.com/spf13/viper v1.11.0
github.com/thoas/go-funk v0.9.2 // indirect
github.com/vulcanize/eth-ipfs-state-validator/v3 v3.0.2
github.com/vulcanize/gap-filler v0.3.1
github.com/vulcanize/ipfs-ethdb/v3 v3.0.3
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
gorm.io/driver/postgres v1.3.7
gorm.io/gorm v1.23.5
) )
require ( replace github.com/ethereum/go-ethereum v1.9.25 => github.com/vulcanize/go-ethereum v1.9.25-statediff-0.0.15
bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/Stebalien/go-bitfield v0.0.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/btcsuite/btcd v0.22.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
github.com/cskr/pubsub v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/friendsofgo/graphiql v0.2.2 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/georgysavva/scany v0.2.9 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/graphql-go/graphql v0.7.9 // indirect
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/influxdata/influxdb v1.8.3 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect
github.com/ipfs/go-bitswap v0.4.0 // indirect
github.com/ipfs/go-blockservice v0.1.7 // indirect
github.com/ipfs/go-cidutil v0.0.2 // indirect
github.com/ipfs/go-datastore v0.4.6 // indirect
github.com/ipfs/go-ds-measure v0.1.0 // indirect
github.com/ipfs/go-fetcher v1.5.0 // indirect
github.com/ipfs/go-filestore v1.0.0 // indirect
github.com/ipfs/go-fs-lock v0.0.7 // indirect
github.com/ipfs/go-graphsync v0.8.0 // indirect
github.com/ipfs/go-ipfs v0.10.0 // indirect
github.com/ipfs/go-ipfs-chunker v0.0.5 // indirect
github.com/ipfs/go-ipfs-config v0.16.0 // indirect
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
github.com/ipfs/go-ipfs-exchange-interface v0.0.1 // indirect
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 // indirect
github.com/ipfs/go-ipfs-files v0.0.8 // indirect
github.com/ipfs/go-ipfs-keystore v0.0.2 // indirect
github.com/ipfs/go-ipfs-pinner v0.1.2 // indirect
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.2 // indirect
github.com/ipfs/go-ipfs-provider v0.6.1 // indirect
github.com/ipfs/go-ipfs-routing v0.1.0 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-ipld-cbor v0.0.5 // indirect
github.com/ipfs/go-ipld-legacy v0.1.0 // indirect
github.com/ipfs/go-ipns v0.1.2 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.3.0 // indirect
github.com/ipfs/go-merkledag v0.4.0 // indirect
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
github.com/ipfs/go-mfs v0.1.2 // indirect
github.com/ipfs/go-namesys v0.3.1 // indirect
github.com/ipfs/go-path v0.1.2 // indirect
github.com/ipfs/go-peertaskqueue v0.4.0 // indirect
github.com/ipfs/go-unixfs v0.2.5 // indirect
github.com/ipfs/go-unixfsnode v1.1.3 // indirect
github.com/ipfs/go-verifcid v0.0.1 // indirect
github.com/ipfs/interface-go-ipfs-core v0.5.1 // indirect
github.com/ipld/go-codec-dagpb v1.3.0 // indirect
github.com/ipld/go-ipld-prime v0.12.2 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.12.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.11.0 // indirect
github.com/jackc/pgx/v4 v4.16.1 // indirect
github.com/jackc/puddle v1.2.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/copier v0.2.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/klauspost/compress v1.11.7 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/koron/go-ssdp v0.0.2 // indirect
github.com/libp2p/go-addr-util v0.1.0 // indirect
github.com/libp2p/go-buffer-pool v0.0.2 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-conn-security-multistream v0.2.1 // indirect
github.com/libp2p/go-doh-resolver v0.3.1 // indirect
github.com/libp2p/go-eventbus v0.2.1 // indirect
github.com/libp2p/go-flow-metrics v0.0.3 // indirect
github.com/libp2p/go-libp2p v0.15.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 // indirect
github.com/libp2p/go-libp2p-autonat v0.4.2 // indirect
github.com/libp2p/go-libp2p-blankhost v0.2.0 // indirect
github.com/libp2p/go-libp2p-circuit v0.4.0 // indirect
github.com/libp2p/go-libp2p-connmgr v0.2.4 // indirect
github.com/libp2p/go-libp2p-core v0.9.0 // indirect
github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.13.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect
github.com/libp2p/go-libp2p-loggables v0.1.0 // indirect
github.com/libp2p/go-libp2p-mplex v0.4.1 // indirect
github.com/libp2p/go-libp2p-nat v0.0.6 // indirect
github.com/libp2p/go-libp2p-noise v0.2.2 // indirect
github.com/libp2p/go-libp2p-peerstore v0.2.8 // indirect
github.com/libp2p/go-libp2p-pnet v0.2.0 // indirect
github.com/libp2p/go-libp2p-pubsub v0.5.4 // indirect
github.com/libp2p/go-libp2p-pubsub-router v0.4.0 // indirect
github.com/libp2p/go-libp2p-quic-transport v0.12.0 // indirect
github.com/libp2p/go-libp2p-record v0.1.3 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect
github.com/libp2p/go-libp2p-swarm v0.5.3 // indirect
github.com/libp2p/go-libp2p-tls v0.2.0 // indirect
github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 // indirect
github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db // indirect
github.com/libp2p/go-libp2p-yamux v0.5.4 // indirect
github.com/libp2p/go-maddr-filter v0.1.0 // indirect
github.com/libp2p/go-mplex v0.3.0 // indirect
github.com/libp2p/go-msgio v0.0.6 // indirect
github.com/libp2p/go-nat v0.0.5 // indirect
github.com/libp2p/go-netroute v0.1.6 // indirect
github.com/libp2p/go-openssl v0.0.7 // indirect
github.com/libp2p/go-reuseport v0.0.2 // indirect
github.com/libp2p/go-reuseport-transport v0.0.5 // indirect
github.com/libp2p/go-sockaddr v0.1.1 // indirect
github.com/libp2p/go-stream-muxer-multistream v0.3.0 // indirect
github.com/libp2p/go-tcp-transport v0.2.8 // indirect
github.com/libp2p/go-ws-transport v0.5.0 // indirect
github.com/libp2p/go-yamux/v2 v2.2.0 // indirect
github.com/libp2p/zeroconf/v2 v2.0.0 // indirect
github.com/lucas-clemente/quic-go v0.26.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/matryer/is v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/miekg/dns v1.1.43 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.0.3 // indirect
github.com/multiformats/go-base36 v0.1.0 // indirect
github.com/multiformats/go-multiaddr v0.4.0 // indirect
github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.0.3 // indirect
github.com/multiformats/go-multicodec v0.3.0 // indirect
github.com/multiformats/go-multistream v0.2.2 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pganalyze/pg_query_go/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/segmentio/fasthash v1.0.3 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.7.1 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/valyala/fastjson v1.6.3 // indirect
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect
github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 // indirect
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/dig v1.10.0 // indirect
go.uber.org/fx v1.13.1 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.19.0 // indirect
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57 // indirect
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
lukechampine.com/blake3 v1.1.6 // indirect
)
replace github.com/ethereum/go-ethereum v1.10.18 => github.com/vulcanize/go-ethereum v1.10.18-statediff-3.2.2 replace github.com/vulcanize/ipfs-ethdb v0.0.2-alpha => github.com/vulcanize/pg-ipfs-ethdb v0.0.2-alpha

1609
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ package main
import ( import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/cmd" "github.com/vulcanize/ipld-eth-server/cmd"
) )
func main() { func main() {

BIN
pkg/.DS_Store vendored

Binary file not shown.

View File

@ -20,10 +20,11 @@ package client
import ( import (
"context" "context"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/serve"
"github.com/vulcanize/ipld-eth-server/v3/pkg/serve"
) )
// Client is used to subscribe to the ipld-eth-server ipld data stream // Client is used to subscribe to the ipld-eth-server ipld data stream

View File

@ -18,31 +18,28 @@ package eth
import ( import (
"context" "context"
"database/sql"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "math"
"math/big" "math/big"
"strconv"
"time" "time"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
// APIName is the namespace for the watcher's eth api // APIName is the namespace for the watcher's eth api
@ -51,27 +48,18 @@ const APIName = "eth"
// APIVersion is the version of the watcher's eth api // APIVersion is the version of the watcher's eth api
const APIVersion = "0.0.1" const APIVersion = "0.0.1"
// PublicEthAPI is the eth namespace API
type PublicEthAPI struct { type PublicEthAPI struct {
// Local db backend // Local db backend
B *Backend B *Backend
// Proxy node for forwarding cache misses // Proxy node for forwarding cache misses
supportsStateDiff bool // Whether the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss supportsStateDiff bool // Whether or not the remote node supports the statediff_writeStateDiffAt endpoint, if it does we can fill the local cache when we hit a miss
rpc *rpc.Client rpc *rpc.Client
ethClient *ethclient.Client ethClient *ethclient.Client
forwardEthCalls bool // if true, forward eth_call calls directly to the configured proxy node
proxyOnError bool // turn on regular proxy fall-through on errors; needed to test difference between direct and indirect fall-through
} }
// NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend // NewPublicEthAPI creates a new PublicEthAPI with the provided underlying Backend
func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardEthCalls, proxyOnError bool) (*PublicEthAPI, error) { func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff bool) *PublicEthAPI {
if forwardEthCalls && client == nil {
return nil, errors.New("ipld-eth-server is configured to forward eth_calls to proxy node but no proxy node is configured")
}
if proxyOnError && client == nil {
return nil, errors.New("ipld-eth-server is configured to forward all calls to proxy node on errors but no proxy node is configured")
}
var ethClient *ethclient.Client var ethClient *ethclient.Client
if client != nil { if client != nil {
ethClient = ethclient.NewClient(client) ethClient = ethclient.NewClient(client)
@ -81,9 +69,7 @@ func NewPublicEthAPI(b *Backend, client *rpc.Client, supportsStateDiff, forwardE
supportsStateDiff: supportsStateDiff, supportsStateDiff: supportsStateDiff,
rpc: client, rpc: client,
ethClient: ethClient, ethClient: ethClient,
forwardEthCalls: forwardEthCalls, }
proxyOnError: proxyOnError,
}, nil
} }
/* /*
@ -100,13 +86,12 @@ func (pea *PublicEthAPI) GetHeaderByNumber(ctx context.Context, number rpc.Block
if header != nil && err == nil { if header != nil && err == nil {
return pea.rpcMarshalHeader(header) return pea.rpcMarshalHeader(header)
} }
if pea.proxyOnError { if pea.ethClient != nil {
if header, err := pea.ethClient.HeaderByNumber(ctx, big.NewInt(number.Int64())); header != nil && err == nil { if header, err := pea.ethClient.HeaderByNumber(ctx, big.NewInt(number.Int64())); header != nil && err == nil {
go pea.writeStateDiffAt(number.Int64()) go pea.writeStateDiffAt(number.Int64())
return pea.rpcMarshalHeader(header) return pea.rpcMarshalHeader(header)
} }
} }
return nil, err return nil, err
} }
@ -118,8 +103,7 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
return res return res
} }
} }
if pea.ethClient != nil {
if pea.proxyOnError {
if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil { if header, err := pea.ethClient.HeaderByHash(ctx, hash); header != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
if res, err := pea.rpcMarshalHeader(header); err != nil { if res, err := pea.rpcMarshalHeader(header); err != nil {
@ -127,7 +111,6 @@ func (pea *PublicEthAPI) GetHeaderByHash(ctx context.Context, hash common.Hash)
} }
} }
} }
return nil return nil
} }
@ -138,9 +121,7 @@ func (pea *PublicEthAPI) rpcMarshalHeader(header *types.Header) (map[string]inte
if err != nil { if err != nil {
return nil, err return nil, err
} }
fields["totalDifficulty"] = (*hexutil.Big)(td) fields["totalDifficulty"] = (*hexutil.Big)(td)
return fields, nil return fields, nil
} }
@ -160,14 +141,12 @@ func (pea *PublicEthAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockN
if block != nil && err == nil { if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
if pea.ethClient != nil {
if pea.proxyOnError {
if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil { if block, err := pea.ethClient.BlockByNumber(ctx, big.NewInt(number.Int64())); block != nil && err == nil {
go pea.writeStateDiffAt(number.Int64()) go pea.writeStateDiffAt(number.Int64())
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
} }
return nil, err return nil, err
} }
@ -178,35 +157,15 @@ func (pea *PublicEthAPI) GetBlockByHash(ctx context.Context, hash common.Hash, f
if block != nil && err == nil { if block != nil && err == nil {
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
if pea.ethClient != nil {
if pea.proxyOnError {
if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil { if block, err := pea.ethClient.BlockByHash(ctx, hash); block != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
return pea.rpcMarshalBlock(block, true, fullTx) return pea.rpcMarshalBlock(block, true, fullTx)
} }
} }
return nil, err return nil, err
} }
// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.
func (pea *PublicEthAPI) ChainId() (*hexutil.Big, error) {
block, err := pea.B.CurrentBlock()
if err != nil {
if pea.proxyOnError {
if id, err := pea.ethClient.ChainID(context.Background()); err == nil {
return (*hexutil.Big)(id), nil
}
}
return nil, err
}
if config := pea.B.Config.ChainConfig; config.IsEIP155(block.Number()) {
return (*hexutil.Big)(config.ChainID), nil
}
return nil, fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block")
}
/* /*
Uncles Uncles
@ -226,14 +185,12 @@ func (pea *PublicEthAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, bloc
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false) return pea.rpcMarshalBlock(block, false, false)
} }
if pea.rpc != nil {
if pea.proxyOnError {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil { if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockNumberAndIndex", blockNr, index); uncle != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false) return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
} }
} }
return nil, err return nil, err
} }
@ -250,14 +207,12 @@ func (pea *PublicEthAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockH
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
return pea.rpcMarshalBlock(block, false, false) return pea.rpcMarshalBlock(block, false, false)
} }
if pea.rpc != nil {
if pea.proxyOnError {
if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil { if uncle, uncleHashes, err := getBlockAndUncleHashes(pea.rpc, ctx, "eth_getUncleByBlockHashAndIndex", blockHash, index); uncle != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false) return pea.rpcMarshalBlockWithUncleHashes(uncle, uncleHashes, false, false)
} }
} }
return nil, err return nil, err
} }
@ -267,15 +222,13 @@ func (pea *PublicEthAPI) GetUncleCountByBlockNumber(ctx context.Context, blockNr
n := hexutil.Uint(len(block.Uncles())) n := hexutil.Uint(len(block.Uncles()))
return &n return &n
} }
if pea.rpc != nil {
if pea.proxyOnError {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockNumber", blockNr); num != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
return num return num
} }
} }
return nil return nil
} }
@ -285,15 +238,13 @@ func (pea *PublicEthAPI) GetUncleCountByBlockHash(ctx context.Context, blockHash
n := hexutil.Uint(len(block.Uncles())) n := hexutil.Uint(len(block.Uncles()))
return &n return &n
} }
if pea.rpc != nil {
if pea.proxyOnError {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getUncleCountByBlockHash", blockHash); num != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
return num return num
} }
} }
return nil return nil
} }
@ -309,15 +260,13 @@ func (pea *PublicEthAPI) GetTransactionCount(ctx context.Context, address common
if count != nil && err == nil { if count != nil && err == nil {
return count, nil return count, nil
} }
if pea.rpc != nil {
if pea.proxyOnError {
var num *hexutil.Uint64 var num *hexutil.Uint64
if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getTransactionCount", address, blockNrOrHash); num != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash) go pea.writeStateDiffAtOrFor(blockNrOrHash)
return num, nil return num, nil
} }
} }
return nil, err return nil, err
} }
@ -326,7 +275,6 @@ func (pea *PublicEthAPI) localGetTransactionCount(ctx context.Context, address c
if err != nil { if err != nil {
return nil, err return nil, err
} }
nonce := hexutil.Uint64(account.Nonce) nonce := hexutil.Uint64(account.Nonce)
return &nonce, nil return &nonce, nil
} }
@ -337,15 +285,13 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByNumber(ctx context.Context, b
n := hexutil.Uint(len(block.Transactions())) n := hexutil.Uint(len(block.Transactions()))
return &n return &n
} }
if pea.rpc != nil {
if pea.proxyOnError {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByNumber", blockNr); num != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
return num return num
} }
} }
return nil return nil
} }
@ -355,15 +301,13 @@ func (pea *PublicEthAPI) GetBlockTransactionCountByHash(ctx context.Context, blo
n := hexutil.Uint(len(block.Transactions())) n := hexutil.Uint(len(block.Transactions()))
return &n return &n
} }
if pea.rpc != nil {
if pea.proxyOnError {
var num *hexutil.Uint var num *hexutil.Uint
if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil { if err := pea.rpc.CallContext(ctx, &num, "eth_getBlockTransactionCountByHash", blockHash); num != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
return num return num
} }
} }
return nil return nil
} }
@ -372,15 +316,13 @@ func (pea *PublicEthAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil { if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index)) return newRPCTransactionFromBlockIndex(block, uint64(index))
} }
if pea.rpc != nil {
if pea.proxyOnError {
var tx *RPCTransaction var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
return tx return tx
} }
} }
return nil return nil
} }
@ -389,15 +331,13 @@ func (pea *PublicEthAPI) GetTransactionByBlockHashAndIndex(ctx context.Context,
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil { if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
return newRPCTransactionFromBlockIndex(block, uint64(index)) return newRPCTransactionFromBlockIndex(block, uint64(index))
} }
if pea.rpc != nil {
if pea.proxyOnError {
var tx *RPCTransaction var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
return tx return tx
} }
} }
return nil return nil
} }
@ -406,7 +346,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockNumberAndIndex(ctx context.Cont
if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil { if block, _ := pea.B.BlockByNumber(ctx, blockNr); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index)) return newRPCRawTransactionFromBlockIndex(block, uint64(index))
} }
if pea.proxyOnError { if pea.rpc != nil {
var tx hexutil.Bytes var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockNumberAndIndex", blockNr, index); tx != nil && err == nil {
go pea.writeStateDiffAt(blockNr.Int64()) go pea.writeStateDiffAt(blockNr.Int64())
@ -421,7 +361,7 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex
if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil { if block, _ := pea.B.BlockByHash(ctx, blockHash); block != nil {
return newRPCRawTransactionFromBlockIndex(block, uint64(index)) return newRPCRawTransactionFromBlockIndex(block, uint64(index))
} }
if pea.proxyOnError { if pea.rpc != nil {
var tx hexutil.Bytes var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByBlockHashAndIndex", blockHash, index); tx != nil && err == nil {
go pea.writeStateDiffFor(blockHash) go pea.writeStateDiffFor(blockHash)
@ -436,14 +376,9 @@ func (pea *PublicEthAPI) GetRawTransactionByBlockHashAndIndex(ctx context.Contex
func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { func (pea *PublicEthAPI) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) {
tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash) tx, blockHash, blockNumber, index, err := pea.B.GetTransaction(ctx, hash)
if tx != nil && err == nil { if tx != nil && err == nil {
header, err := pea.B.HeaderByHash(ctx, blockHash) return NewRPCTransaction(tx, blockHash, blockNumber, index), nil
if err != nil {
return nil, err
}
return NewRPCTransaction(tx, blockHash, blockNumber, index, header.BaseFee), nil
} }
if pea.proxyOnError { if pea.rpc != nil {
var tx *RPCTransaction var tx *RPCTransaction
if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByHash", hash); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getTransactionByHash", hash); tx != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
@ -460,7 +395,7 @@ func (pea *PublicEthAPI) GetRawTransactionByHash(ctx context.Context, hash commo
if tx != nil && err == nil { if tx != nil && err == nil {
return rlp.EncodeToBytes(tx) return rlp.EncodeToBytes(tx)
} }
if pea.proxyOnError { if pea.rpc != nil {
var tx hexutil.Bytes var tx hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByHash", hash); tx != nil && err == nil { if err := pea.rpc.CallContext(ctx, &tx, "eth_getRawTransactionByHash", hash); tx != nil && err == nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
@ -482,7 +417,7 @@ func (pea *PublicEthAPI) GetTransactionReceipt(ctx context.Context, hash common.
if receipt != nil && err == nil { if receipt != nil && err == nil {
return receipt, nil return receipt, nil
} }
if pea.proxyOnError { if pea.rpc != nil {
if receipt := pea.remoteGetTransactionReceipt(ctx, hash); receipt != nil { if receipt := pea.remoteGetTransactionReceipt(ctx, hash); receipt != nil {
go pea.writeStateDiffFor(hash) go pea.writeStateDiffFor(hash)
return receipt, nil return receipt, nil
@ -504,14 +439,6 @@ func (pea *PublicEthAPI) localGetTransactionReceipt(ctx context.Context, hash co
if err != nil { if err != nil {
return nil, err return nil, err
} }
block, err := pea.B.BlockByHash(ctx, blockHash)
if err != nil {
return nil, err
}
err = receipts.DeriveFields(pea.B.Config.ChainConfig, blockHash, blockNumber, block.Transactions())
if err != nil {
return nil, err
}
if len(receipts) <= int(index) { if len(receipts) <= int(index) {
return nil, nil return nil, nil
} }
@ -578,30 +505,30 @@ func (pea *PublicEthAPI) remoteGetTransactionReceipt(ctx context.Context, hash c
// GetLogs returns logs matching the given argument that are stored within the state. // GetLogs returns logs matching the given argument that are stored within the state.
// //
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) { func (pea *PublicEthAPI) GetLogs(ctx context.Context, crit ethereum.FilterQuery) ([]*types.Log, error) {
logs, err := pea.localGetLogs(crit) logs, err := pea.localGetLogs(ctx, crit)
if err != nil && pea.proxyOnError { if err != nil && pea.rpc != nil {
var res []*types.Log if arg, err := toFilterArg(crit); err == nil {
if err := pea.rpc.CallContext(ctx, &res, "eth_getLogs", crit); err == nil { var res []*types.Log
go pea.writeStateDiffWithCriteria(crit) if err := pea.rpc.CallContext(ctx, &res, "eth_getLogs", arg); err == nil {
return res, nil go pea.writeStateDiffWithCriteria(crit)
return res, nil
}
} }
} }
return logs, err return logs, err
} }
func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log, error) { func (pea *PublicEthAPI) localGetLogs(ctx context.Context, crit ethereum.FilterQuery) ([]*types.Log, error) {
// TODO: this can be optimized away from using the old cid retriever and ipld fetcher interfaces // TODO: this can be optimized away from using the old cid retriever and ipld fetcher interfaces
// Convert FilterQuery into ReceiptFilter // Convert FilterQuery into ReceiptFilter
addrStrs := make([]string, len(crit.Addresses)) addrStrs := make([]string, len(crit.Addresses))
for i, addr := range crit.Addresses { for i, addr := range crit.Addresses {
addrStrs[i] = addr.String() addrStrs[i] = addr.String()
} }
topicStrSets := make([][]string, 4)
topicStrSets := make([][]string, len(crit.Topics))
for i, topicSet := range crit.Topics { for i, topicSet := range crit.Topics {
if i > 3 { if i > 3 {
topicStrSets = topicStrSets[:4]
// don't allow more than 4 topics // don't allow more than 4 topics
break break
} }
@ -630,16 +557,21 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
} }
}() }()
// If we have a blockHash to filter on, fire off single retrieval query // If we have a blockhash to filter on, fire off single retrieval query
if crit.BlockHash != nil { if crit.BlockHash != nil {
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, 0, crit.BlockHash) rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, 0, crit.BlockHash, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, rctCIDs)
return decomposeLogs(filteredLogs) if err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
return extractLogsOfInterest(rctIPLDs, filter.Topics)
} }
// Otherwise, create block range from criteria // Otherwise, create block range from criteria
// nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number // nil values are filled in; to request a single block have both ToBlock and FromBlock equal that number
startingBlock := crit.FromBlock startingBlock := crit.FromBlock
@ -647,7 +579,6 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
if startingBlock == nil { if startingBlock == nil {
startingBlock = common.Big0 startingBlock = common.Big0
} }
if endingBlock == nil { if endingBlock == nil {
endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber() endingBlockInt, err := pea.B.Retriever.RetrieveLastBlockNumber()
if err != nil { if err != nil {
@ -655,28 +586,24 @@ func (pea *PublicEthAPI) localGetLogs(crit filters.FilterCriteria) ([]*types.Log
} }
endingBlock = big.NewInt(endingBlockInt) endingBlock = big.NewInt(endingBlockInt)
} }
start := startingBlock.Int64() start := startingBlock.Int64()
end := endingBlock.Int64() end := endingBlock.Int64()
var logs []*types.Log allRctCIDs := make([]eth.ReceiptModel, 0)
for i := start; i <= end; i++ { for i := start; i <= end; i++ {
filteredLogs, err := pea.B.Retriever.RetrieveFilteredLog(tx, filter, i, nil) rctCIDs, err := pea.B.Retriever.RetrieveRctCIDs(tx, filter, i, nil, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
allRctCIDs = append(allRctCIDs, rctCIDs...)
logCIDs, err := decomposeLogs(filteredLogs) }
if err != nil { rctIPLDs, err := pea.B.Fetcher.FetchRcts(tx, allRctCIDs)
return nil, err if err != nil {
} return nil, err
logs = append(logs, logCIDs...)
} }
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
return nil, err return nil, err
} }
logs, err := extractLogsOfInterest(rctIPLDs, filter.Topics)
return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer return logs, err // need to return err variable so that we return the err = tx.Commit() assignment in the defer
} }
@ -694,17 +621,13 @@ func (pea *PublicEthAPI) GetBalance(ctx context.Context, address common.Address,
if bal != nil && err == nil { if bal != nil && err == nil {
return bal, nil return bal, nil
} }
if pea.proxyOnError { if pea.rpc != nil {
var res *hexutil.Big var res *hexutil.Big
if err := pea.rpc.CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash); res != nil && err == nil { if err := pea.rpc.CallContext(ctx, &res, "eth_getBalance", address, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash) go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil return res, nil
} }
} }
if err == sql.ErrNoRows {
return (*hexutil.Big)(big.NewInt(0)), nil
}
return nil, err return nil, err
} }
@ -722,28 +645,15 @@ func (pea *PublicEthAPI) localGetBalance(ctx context.Context, address common.Add
func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { func (pea *PublicEthAPI) GetStorageAt(ctx context.Context, address common.Address, key string, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
storageVal, err := pea.B.GetStorageByNumberOrHash(ctx, address, common.HexToHash(key), blockNrOrHash) storageVal, err := pea.B.GetStorageByNumberOrHash(ctx, address, common.HexToHash(key), blockNrOrHash)
if storageVal != nil && err == nil { if storageVal != nil && err == nil {
var value common.Hash return storageVal, nil
_, content, _, err := rlp.Split(storageVal)
if err == io.ErrUnexpectedEOF {
return hexutil.Bytes{}, nil
}
if err != nil {
return nil, err
}
value.SetBytes(content)
return value[:], nil
} }
if pea.proxyOnError { if pea.rpc != nil {
var res hexutil.Bytes var res hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash); res != nil && err == nil { if err := pea.rpc.CallContext(ctx, &res, "eth_getStorageAt", address, key, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash) go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil return res, nil
} }
} }
if err == sql.ErrNoRows {
return make([]byte, 32), nil
}
return nil, err return nil, err
} }
@ -753,17 +663,13 @@ func (pea *PublicEthAPI) GetCode(ctx context.Context, address common.Address, bl
if code != nil && err == nil { if code != nil && err == nil {
return code, nil return code, nil
} }
if pea.proxyOnError { if pea.rpc != nil {
var res hexutil.Bytes var res hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash); res != nil && err == nil { if err := pea.rpc.CallContext(ctx, &res, "eth_getCode", address, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash) go pea.writeStateDiffAtOrFor(blockNrOrHash)
return res, nil return res, nil
} }
} }
if err == sql.ErrNoRows {
return code, nil
}
return nil, err return nil, err
} }
@ -773,7 +679,7 @@ func (pea *PublicEthAPI) GetProof(ctx context.Context, address common.Address, s
if proof != nil && err == nil { if proof != nil && err == nil {
return proof, nil return proof, nil
} }
if pea.proxyOnError { if pea.rpc != nil {
var res *AccountResult var res *AccountResult
if err := pea.rpc.CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash); res != nil && err == nil { if err := pea.rpc.CallContext(ctx, &res, "eth_getProof", address, storageKeys, blockNrOrHash); res != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash) go pea.writeStateDiffAtOrFor(blockNrOrHash)
@ -832,59 +738,50 @@ func (pea *PublicEthAPI) localGetProof(ctx context.Context, address common.Addre
}, state.Error() }, state.Error()
} }
// revertError is an API error that encompassas an EVM revertal with JSON error // Call executes the given transaction on the state for the given block number.
// code and a binary data blob. //
type revertError struct { // Additionally, the caller can specify a batch of contract for fields overriding.
error //
reason string // revert reason hex encoded // Note, this function doesn't make and changes in the state/blockchain and is
} // useful to execute and retrieve values.
func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *map[common.Address]account) (hexutil.Bytes, error) {
// ErrorCode returns the JSON error code for a revertal. var accounts map[common.Address]account
// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal if overrides != nil {
func (e *revertError) ErrorCode() int { accounts = *overrides
return 3
}
// ErrorData returns the hex encoded revert reason.
func (e *revertError) ErrorData() interface{} {
return e.reason
}
func newRevertError(result *core.ExecutionResult) *revertError {
reason, errUnpack := abi.UnpackRevert(result.Revert())
err := errors.New("execution reverted")
if errUnpack == nil {
err = fmt.Errorf("execution reverted: %v", reason)
} }
return &revertError{ res, _, failed, err := DoCall(ctx, pea.B, args, blockNrOrHash, accounts, 5*time.Second, pea.B.Config.RPCGasCap)
error: err, if (failed || err != nil) && pea.rpc != nil {
reason: hexutil.Encode(result.Revert()), var hex hexutil.Bytes
if err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides); hex != nil && err == nil {
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return hex, nil
}
} }
if failed && err == nil {
return nil, errors.New("eth_call failed without error")
}
return (hexutil.Bytes)(res), err
} }
// OverrideAccount indicates the overriding fields of account during the execution func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides map[common.Address]account, timeout time.Duration, globalGasCap *big.Int) ([]byte, uint64, bool, error) {
// of a message call. defer func(start time.Time) {
// Note, state and stateDiff can't be specified at the same time. If state is logrus.Debugf("Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
// set, message execution will only use the data in the given state. Otherwise }(time.Now())
// if statDiff is set, all diff will be applied first and then execute the call state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
// message. if state == nil || err != nil {
type OverrideAccount struct { return nil, 0, false, err
Nonce *hexutil.Uint64 `json:"nonce"`
Code *hexutil.Bytes `json:"code"`
Balance **hexutil.Big `json:"balance"`
State *map[common.Hash]common.Hash `json:"state"`
StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
}
// StateOverride is the collection of overridden accounts.
type StateOverride map[common.Address]OverrideAccount
// Apply overrides the fields of specified accounts into the given state.
func (diff *StateOverride) Apply(state *state.StateDB) error {
if diff == nil {
return nil
} }
for addr, account := range *diff { // Set sender address or use a default if none specified
var addr common.Address
if args.From == nil {
if b.Config.DefaultSender != nil {
addr = *b.Config.DefaultSender
}
} else {
addr = *args.From
}
// Override the fields of specified contracts before execution.
for addr, account := range overrides {
// Override account nonce. // Override account nonce.
if account.Nonce != nil { if account.Nonce != nil {
state.SetNonce(addr, uint64(*account.Nonce)) state.SetNonce(addr, uint64(*account.Nonce))
@ -898,7 +795,7 @@ func (diff *StateOverride) Apply(state *state.StateDB) error {
state.SetBalance(addr, (*big.Int)(*account.Balance)) state.SetBalance(addr, (*big.Int)(*account.Balance))
} }
if account.State != nil && account.StateDiff != nil { if account.State != nil && account.StateDiff != nil {
return fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex()) return nil, 0, false, fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex())
} }
// Replace entire state if caller requires. // Replace entire state if caller requires.
if account.State != nil { if account.State != nil {
@ -911,56 +808,32 @@ func (diff *StateOverride) Apply(state *state.StateDB) error {
} }
} }
} }
return nil // Set default gas & gas price if none were set
} gas := uint64(math.MaxUint64 / 2)
if args.Gas != nil {
// Call executes the given transaction on the state for the given block number. gas = uint64(*args.Gas)
// }
// Additionally, the caller can specify a batch of contract for fields overriding. if globalGasCap != nil && globalGasCap.Uint64() < gas {
// logrus.Warnf("Caller gas above allowance, capping; requested: %d, cap: %d", gas, globalGasCap)
// Note, this function doesn't make and changes in the state/blockchain and is gas = globalGasCap.Uint64()
// useful to execute and retrieve values. }
func (pea *PublicEthAPI) Call(ctx context.Context, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Bytes, error) { gasPrice := new(big.Int).SetUint64(params.GWei)
if pea.forwardEthCalls { if args.GasPrice != nil {
var hex hexutil.Bytes gasPrice = args.GasPrice.ToInt()
err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides)
return hex, err
} }
result, err := DoCall(ctx, pea.B, args, blockNrOrHash, overrides, 5*time.Second, pea.B.Config.RPCGasCap.Uint64()) value := new(big.Int)
if args.Value != nil {
// If the result contains a revert reason, try to unpack and return it. value = args.Value.ToInt()
if err == nil {
if len(result.Revert()) > 0 {
err = newRevertError(result)
} else if result.Err != nil {
err = result.Err
}
} }
if err != nil && pea.proxyOnError { var data []byte
var hex hexutil.Bytes if args.Data != nil {
if err := pea.rpc.CallContext(ctx, &hex, "eth_call", args, blockNrOrHash, overrides); hex != nil && err == nil { data = []byte(*args.Data)
go pea.writeStateDiffAtOrFor(blockNrOrHash)
return hex, nil
}
}
return result.Return(), err
}
func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) {
defer func(start time.Time) {
logrus.Debugf("Executing EVM call finished %s runtime %s", time.Now().String(), time.Since(start).String())
}(time.Now())
state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
if state == nil || err != nil {
return nil, err
} }
if err := overrides.Apply(state); err != nil { // Create new call message
return nil, err msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, false)
}
// Setup context so it may be cancelled the call has completed // Setup context so it may be cancelled the call has completed
// or, in case of unmetered gas, setup a context with a timeout. // or, in case of unmetered gas, setup a context with a timeout.
@ -975,16 +848,10 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
defer cancel() defer cancel()
// Get a new instance of the EVM. // Get a new instance of the EVM.
msg, err := args.ToMessage(globalGasCap, header.BaseFee) evm, err := b.GetEVM(ctx, msg, state, header)
if err != nil { if err != nil {
return nil, err return nil, 0, false, err
} }
evm, vmError, err := b.GetEVM(ctx, msg, state, header)
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the // Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly) // EVM has finished, cancelling may be done (repeatedly)
go func() { go func() {
@ -992,21 +859,18 @@ func DoCall(ctx context.Context, b *Backend, args CallArgs, blockNrOrHash rpc.Bl
evm.Cancel() evm.Cancel()
}() }()
// Execute the message. // Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64) gp := new(core.GasPool).AddGas(math.MaxUint64)
result, err := core.ApplyMessage(evm, msg, gp) result, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil { if err != nil {
return nil, err return nil, 0, false, fmt.Errorf("execution failed: %v", err)
} }
// If the timer caused an abort, return an appropriate error message // If the timer caused an abort, return an appropriate error message
if evm.Cancelled() { if evm.Cancelled() {
return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) return nil, 0, false, fmt.Errorf("execution aborted (timeout = %v)", timeout)
} }
if err != nil { return result.Return(), result.UsedGas, result.Failed(), err
return result, fmt.Errorf("err: %w (supplied gas %d)", err, msg.Gas())
}
return result, nil
} }
// writeStateDiffAtOrFor calls out to the proxy statediffing geth client to fill in a gap in the index // writeStateDiffAtOrFor calls out to the proxy statediffing geth client to fill in a gap in the index
@ -1025,7 +889,7 @@ func (pea *PublicEthAPI) writeStateDiffAtOrFor(blockNrOrHash rpc.BlockNumberOrHa
} }
// writeStateDiffWithCriteria calls out to the proxy statediffing geth client to fill in a gap in the index // writeStateDiffWithCriteria calls out to the proxy statediffing geth client to fill in a gap in the index
func (pea *PublicEthAPI) writeStateDiffWithCriteria(crit filters.FilterCriteria) { func (pea *PublicEthAPI) writeStateDiffWithCriteria(crit ethereum.FilterQuery) {
// short circuit right away if the proxy doesn't support diffing // short circuit right away if the proxy doesn't support diffing
if !pea.supportsStateDiff { if !pea.supportsStateDiff {
return return
@ -1130,42 +994,3 @@ func toHexSlice(b [][]byte) []string {
} }
return r return r
} }
// decomposeLogs return logs from LogResult.
func decomposeLogs(logCIDs []LogResult) ([]*types.Log, error) {
logs := make([]*types.Log, len(logCIDs))
for i, l := range logCIDs {
topics := make([]common.Hash, 0)
if l.Topic0 != "" {
topics = append(topics, common.HexToHash(l.Topic0))
}
if l.Topic1 != "" {
topics = append(topics, common.HexToHash(l.Topic1))
}
if l.Topic2 != "" {
topics = append(topics, common.HexToHash(l.Topic2))
}
if l.Topic3 != "" {
topics = append(topics, common.HexToHash(l.Topic3))
}
// TODO: should we convert string to uint ?
blockNum, err := strconv.ParseUint(l.BlockNumber, 10, 64)
if err != nil {
return nil, err
}
logs[i] = &types.Log{
Address: common.HexToAddress(l.Address),
Topics: topics,
Data: l.Data,
BlockNumber: blockNum,
TxHash: common.HexToHash(l.TxHash),
TxIndex: uint(l.TxnIndex),
BlockHash: common.HexToHash(l.BlockHash),
Index: uint(l.Index),
}
}
return logs, nil
}

View File

@ -18,40 +18,34 @@ package eth_test
import ( import (
"context" "context"
"math/big"
"strconv" "strconv"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
var ( var (
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f") randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
randomHash = crypto.Keccak256Hash(randomAddr.Bytes()) randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
number = rpc.BlockNumber(test_helpers.BlockNumber.Int64()) number = rpc.BlockNumber(test_helpers.BlockNumber.Int64())
londonBlockNum = rpc.BlockNumber(test_helpers.LondonBlockNum.Int64()) wrongNumber = rpc.BlockNumber(number + 1)
wrongNumber = number + 1 blockHash = test_helpers.MockBlock.Header().Hash()
blockHash = test_helpers.MockBlock.Header().Hash() ctx = context.Background()
baseFee = test_helpers.MockLondonBlock.BaseFee() expectedBlock = map[string]interface{}{
ctx = context.Background()
expectedBlock = map[string]interface{}{
"number": (*hexutil.Big)(test_helpers.MockBlock.Number()), "number": (*hexutil.Big)(test_helpers.MockBlock.Number()),
"hash": test_helpers.MockBlock.Hash(), "hash": test_helpers.MockBlock.Hash(),
"parentHash": test_helpers.MockBlock.ParentHash(), "parentHash": test_helpers.MockBlock.ParentHash(),
@ -131,14 +125,13 @@ var (
"receiptsRoot": test_helpers.MockUncles[1].ReceiptHash, "receiptsRoot": test_helpers.MockUncles[1].ReceiptHash,
"uncles": []common.Hash{}, "uncles": []common.Hash{},
} }
expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 0, test_helpers.MockBlock.BaseFee()) expectedTransaction = eth.NewRPCTransaction(test_helpers.MockTransactions[0], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 0)
expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1, test_helpers.MockBlock.BaseFee()) expectedTransaction2 = eth.NewRPCTransaction(test_helpers.MockTransactions[1], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 1)
expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2, test_helpers.MockBlock.BaseFee()) expectedTransaction3 = eth.NewRPCTransaction(test_helpers.MockTransactions[2], test_helpers.MockBlock.Hash(), test_helpers.MockBlock.NumberU64(), 2)
expectedLondonTransaction = eth.NewRPCTransaction(test_helpers.MockLondonTransactions[0], test_helpers.MockLondonBlock.Hash(), test_helpers.MockLondonBlock.NumberU64(), 0, test_helpers.MockLondonBlock.BaseFee()) expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0])
expectRawTx, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[0]) expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1])
expectRawTx2, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[1]) expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2])
expectRawTx3, _ = rlp.EncodeToBytes(test_helpers.MockTransactions[2]) expectedReceipt = map[string]interface{}{
expectedReceipt = map[string]interface{}{
"blockHash": blockHash, "blockHash": blockHash,
"blockNumber": hexutil.Uint64(uint64(number.Int64())), "blockNumber": hexutil.Uint64(uint64(number.Int64())),
"transactionHash": expectedTransaction.Hash, "transactionHash": expectedTransaction.Hash,
@ -150,7 +143,7 @@ var (
"contractAddress": nil, "contractAddress": nil,
"logs": test_helpers.MockReceipts[0].Logs, "logs": test_helpers.MockReceipts[0].Logs,
"logsBloom": test_helpers.MockReceipts[0].Bloom, "logsBloom": test_helpers.MockReceipts[0].Bloom,
"status": hexutil.Uint(test_helpers.MockReceipts[0].Status), "root": hexutil.Bytes(test_helpers.MockReceipts[0].PostState),
} }
expectedReceipt2 = map[string]interface{}{ expectedReceipt2 = map[string]interface{}{
"blockHash": blockHash, "blockHash": blockHash,
@ -175,7 +168,7 @@ var (
"to": expectedTransaction3.To, "to": expectedTransaction3.To,
"gasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].GasUsed), "gasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].GasUsed),
"cumulativeGasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].CumulativeGasUsed), "cumulativeGasUsed": hexutil.Uint64(test_helpers.MockReceipts[2].CumulativeGasUsed),
"contractAddress": test_helpers.ContractAddress, "contractAddress": nil,
"logs": test_helpers.MockReceipts[2].Logs, "logs": test_helpers.MockReceipts[2].Logs,
"logsBloom": test_helpers.MockReceipts[2].Bloom, "logsBloom": test_helpers.MockReceipts[2].Bloom,
"root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState), "root": hexutil.Bytes(test_helpers.MockReceipts[2].PostState),
@ -184,75 +177,32 @@ var (
var _ = Describe("API", func() { var _ = Describe("API", func() {
var ( var (
db *sqlx.DB db *postgres.DB
api *eth.PublicEthAPI api *eth.PublicEthAPI
chainConfig = params.TestChainConfig
) )
// Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database // Test db setup, rather than using BeforeEach we only need to setup once since the tests do not mutate the database
// Note: if you focus one of the tests be sure to focus this and the defered It() // Note: if you focus one of the tests be sure to focus this and the defered It()
It("test init", func() { It("test init", func() {
var ( var err error
err error db, err = shared.SetupDB()
tx interfaces.Batch
)
db = shared.SetupDB()
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err := eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call.
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "api_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false) indexAndPublisher := eth2.NewIPLDPublisher(db)
tx, err = indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty()) backend, err := eth.NewEthBackend(db, &eth.Config{})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
api = eth.NewPublicEthAPI(backend, nil, false)
ccHash := sdtypes.CodeAndCodeHash{ err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload)
Hash: test_helpers.ContractCodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
for _, node := range test_helpers.MockStateNodes {
err = indexAndPublisher.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
uncles := test_helpers.MockBlock.Uncles() uncles := test_helpers.MockBlock.Uncles()
uncleHashes := make([]common.Hash, len(uncles)) uncleHashes := make([]common.Hash, len(uncles))
for i, uncle := range uncles { for i, uncle := range uncles {
uncleHashes[i] = uncle.Hash() uncleHashes[i] = uncle.Hash()
} }
expectedBlock["uncles"] = uncleHashes expectedBlock["uncles"] = uncleHashes
// setting chain config to for london block
chainConfig.LondonBlock = big.NewInt(2)
indexAndPublisher = shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
tx, err = indexAndPublisher.PushBlock(test_helpers.MockLondonBlock, test_helpers.MockLondonReceipts, test_helpers.MockLondonBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
}) })
// Single test db tear down at end of all tests // Single test db tear down at end of all tests
defer It("test teardown", func() { shared.TearDownDB(db) }) defer It("test teardown", func() { eth.TearDownDB(db) })
/* /*
Headers and blocks Headers and blocks
@ -270,6 +220,8 @@ var _ = Describe("API", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("sql: no rows in result set")) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(header).To(BeNil()) Expect(header).To(BeNil())
_, err = api.B.DB.Beginx()
Expect(err).ToNot(HaveOccurred())
}) })
}) })
@ -290,7 +242,7 @@ var _ = Describe("API", func() {
bn := api.BlockNumber() bn := api.BlockNumber()
ubn := (uint64)(bn) ubn := (uint64)(bn)
subn := strconv.FormatUint(ubn, 10) subn := strconv.FormatUint(ubn, 10)
Expect(subn).To(Equal(test_helpers.LondonBlockNum.String())) Expect(subn).To(Equal(test_helpers.BlockNumber.String()))
}) })
}) })
@ -319,20 +271,10 @@ var _ = Describe("API", func() {
Expect(val).To(Equal(block[key])) Expect(val).To(Equal(block[key]))
} }
}) })
It("Returns `nil` if a block cannot be found", func() { It("Throws an error if a block cannot be found", func() {
block, err := api.GetBlockByNumber(ctx, wrongNumber, false) _, err := api.GetBlockByNumber(ctx, wrongNumber, false)
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(block).To(BeNil()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
})
It("Fetch BaseFee from london block by block number, returns `nil` for legacy block", func() {
block, err := api.GetBlockByNumber(ctx, number, false)
Expect(err).ToNot(HaveOccurred())
_, ok := block["baseFee"]
Expect(ok).To(Equal(false))
block, err = api.GetBlockByNumber(ctx, londonBlockNum, false)
Expect(err).ToNot(HaveOccurred())
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
}) })
}) })
@ -361,19 +303,10 @@ var _ = Describe("API", func() {
Expect(val).To(Equal(block[key])) Expect(val).To(Equal(block[key]))
} }
}) })
It("Returns `nil` if a block cannot be found", func() { It("Throws an error if a block cannot be found", func() {
block, err := api.GetBlockByHash(ctx, randomHash, false) _, err := api.GetBlockByHash(ctx, randomHash, false)
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(block).To(BeZero()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
})
It("Fetch BaseFee from london block by block hash, returns `nil` for legacy block", func() {
block, err := api.GetBlockByHash(ctx, test_helpers.MockBlock.Hash(), true)
Expect(err).ToNot(HaveOccurred())
_, ok := block["baseFee"]
Expect(ok).To(Equal(false))
block, err = api.GetBlockByHash(ctx, test_helpers.MockLondonBlock.Hash(), false)
Expect(err).ToNot(HaveOccurred())
Expect(block["baseFee"].(*big.Int)).To(Equal(baseFee))
}) })
}) })
@ -392,10 +325,10 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(uncle2).To(Equal(expectedUncle2)) Expect(uncle2).To(Equal(expectedUncle2))
}) })
It("Returns `nil` if an block for block number cannot be found", func() { It("Throws an error if an block for blocknumber cannot be found", func() {
block, err := api.GetUncleByBlockNumberAndIndex(ctx, wrongNumber, 0) _, err := api.GetUncleByBlockNumberAndIndex(ctx, wrongNumber, 0)
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(block).To(BeNil()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
}) })
It("Returns `nil` if an uncle at the provided index does not exist for the block found for the provided block number", func() { It("Returns `nil` if an uncle at the provided index does not exist for the block found for the provided block number", func() {
uncle, err := api.GetUncleByBlockNumberAndIndex(ctx, number, 2) uncle, err := api.GetUncleByBlockNumberAndIndex(ctx, number, 2)
@ -413,10 +346,10 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(uncle2).To(Equal(expectedUncle2)) Expect(uncle2).To(Equal(expectedUncle2))
}) })
It("Returns `nil` if a block for blockhash cannot be found", func() { It("Throws an error if an block for blockhash cannot be found", func() {
block, err := api.GetUncleByBlockHashAndIndex(ctx, randomHash, 0) _, err := api.GetUncleByBlockHashAndIndex(ctx, randomHash, 0)
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(block).To(BeNil()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
}) })
It("Returns `nil` if an uncle at the provided index does not exist for the block with the provided hash", func() { It("Returns `nil` if an uncle at the provided index does not exist for the block with the provided hash", func() {
uncle, err := api.GetUncleByBlockHashAndIndex(ctx, blockHash, 2) uncle, err := api.GetUncleByBlockHashAndIndex(ctx, blockHash, 2)
@ -428,7 +361,6 @@ var _ = Describe("API", func() {
Describe("eth_getUncleCountByBlockNumber", func() { Describe("eth_getUncleCountByBlockNumber", func() {
It("Retrieves the number of uncles for the canonical block with the provided number", func() { It("Retrieves the number of uncles for the canonical block with the provided number", func() {
count := api.GetUncleCountByBlockNumber(ctx, number) count := api.GetUncleCountByBlockNumber(ctx, number)
Expect(*count).NotTo(Equal(nil))
Expect(uint64(*count)).To(Equal(uint64(2))) Expect(uint64(*count)).To(Equal(uint64(2)))
}) })
}) })
@ -436,7 +368,6 @@ var _ = Describe("API", func() {
Describe("eth_getUncleCountByBlockHash", func() { Describe("eth_getUncleCountByBlockHash", func() {
It("Retrieves the number of uncles for the block with the provided hash", func() { It("Retrieves the number of uncles for the block with the provided hash", func() {
count := api.GetUncleCountByBlockHash(ctx, blockHash) count := api.GetUncleCountByBlockHash(ctx, blockHash)
Expect(*count).NotTo(Equal(nil))
Expect(uint64(*count)).To(Equal(uint64(2))) Expect(uint64(*count)).To(Equal(uint64(2)))
}) })
}) })
@ -471,14 +402,14 @@ var _ = Describe("API", func() {
Describe("eth_getBlockTransactionCountByNumber", func() { Describe("eth_getBlockTransactionCountByNumber", func() {
It("Retrieves the number of transactions in the canonical block with the provided number", func() { It("Retrieves the number of transactions in the canonical block with the provided number", func() {
count := api.GetBlockTransactionCountByNumber(ctx, number) count := api.GetBlockTransactionCountByNumber(ctx, number)
Expect(uint64(*count)).To(Equal(uint64(4))) Expect(uint64(*count)).To(Equal(uint64(3)))
}) })
}) })
Describe("eth_getBlockTransactionCountByHash", func() { Describe("eth_getBlockTransactionCountByHash", func() {
It("Retrieves the number of transactions in the block with the provided hash ", func() { It("Retrieves the number of transactions in the block with the provided hash ", func() {
count := api.GetBlockTransactionCountByHash(ctx, blockHash) count := api.GetBlockTransactionCountByHash(ctx, blockHash)
Expect(uint64(*count)).To(Equal(uint64(4))) Expect(uint64(*count)).To(Equal(uint64(3)))
}) })
}) })
@ -496,13 +427,6 @@ var _ = Describe("API", func() {
Expect(tx).ToNot(BeNil()) Expect(tx).ToNot(BeNil())
Expect(tx).To(Equal(expectedTransaction3)) Expect(tx).To(Equal(expectedTransaction3))
}) })
It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() {
tx := api.GetTransactionByBlockNumberAndIndex(ctx, londonBlockNum, 0)
Expect(tx).ToNot(BeNil())
Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap())))
Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap())))
Expect(tx).To(Equal(expectedLondonTransaction))
})
}) })
Describe("eth_getTransactionByBlockHashAndIndex", func() { Describe("eth_getTransactionByBlockHashAndIndex", func() {
@ -519,14 +443,6 @@ var _ = Describe("API", func() {
Expect(tx).ToNot(BeNil()) Expect(tx).ToNot(BeNil())
Expect(tx).To(Equal(expectedTransaction3)) Expect(tx).To(Equal(expectedTransaction3))
}) })
It("Retrieves the GasFeeCap and GasTipCap for dynamic transaction from the london block hash", func() {
tx := api.GetTransactionByBlockHashAndIndex(ctx, test_helpers.MockLondonBlock.Hash(), 0)
Expect(tx).ToNot(BeNil())
Expect(tx.GasFeeCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasFeeCap())))
Expect(tx.GasTipCap).To(Equal((*hexutil.Big)(test_helpers.MockLondonTransactions[0].GasTipCap())))
Expect(tx).To(Equal(expectedLondonTransaction))
})
}) })
Describe("eth_getRawTransactionByBlockNumberAndIndex", func() { Describe("eth_getRawTransactionByBlockNumberAndIndex", func() {
@ -638,16 +554,10 @@ var _ = Describe("API", func() {
Describe("eth_getLogs", func() { Describe("eth_getLogs", func() {
It("Retrieves receipt logs that match the provided topics within the provided range", func() { It("Retrieves receipt logs that match the provided topics within the provided range", func() {
crit := filters.FilterCriteria{ crit := ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x0c"), common.HexToHash("0x04"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0b"),
}, },
}, },
FromBlock: test_helpers.MockBlock.Number(), FromBlock: test_helpers.MockBlock.Number(),
@ -655,62 +565,10 @@ var _ = Describe("API", func() {
} }
logs, err := api.GetLogs(ctx, crit) logs, err := api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x08"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0c"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x09"),
},
{
common.HexToHash("0x0a"),
},
{
common.HexToHash("0x0b"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog4}))
crit = filters.FilterCriteria{
Topics: [][]common.Hash{
{
common.HexToHash("0x04"),
},
},
FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(),
}
logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x04"),
@ -725,7 +583,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x04"),
@ -740,7 +598,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x04"),
@ -756,7 +614,7 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0)) Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x04"),
@ -773,7 +631,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x05"), common.HexToHash("0x05"),
@ -790,7 +648,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x05"), common.HexToHash("0x05"),
@ -808,7 +666,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
common.HexToHash("0x04"), common.HexToHash("0x04"),
@ -827,7 +685,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{}, {},
{ {
@ -842,7 +700,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{}, {},
{ {
@ -857,20 +715,20 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
Topics: [][]common.Hash{}, Topics: [][]common.Hash{},
FromBlock: test_helpers.MockBlock.Number(), FromBlock: test_helpers.MockBlock.Number(),
ToBlock: test_helpers.MockBlock.Number(), ToBlock: test_helpers.MockBlock.Number(),
} }
logs, err = api.GetLogs(ctx, crit) logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(6)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
}) })
It("Uses the provided blockhash if one is provided", func() { It("Uses the provided blockhash if one is provided", func() {
hash := test_helpers.MockBlock.Hash() hash := test_helpers.MockBlock.Hash()
crit := filters.FilterCriteria{ crit := ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{}, {},
@ -884,7 +742,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -900,7 +758,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{}, {},
@ -914,7 +772,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -930,7 +788,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -945,7 +803,7 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0)) Expect(len(logs)).To(Equal(0))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -962,7 +820,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(1)) Expect(len(logs)).To(Equal(1))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -976,7 +834,7 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{ Topics: [][]common.Hash{
{ {
@ -994,19 +852,19 @@ var _ = Describe("API", func() {
Expect(len(logs)).To(Equal(2)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Topics: [][]common.Hash{}, Topics: [][]common.Hash{},
} }
logs, err = api.GetLogs(ctx, crit) logs, err = api.GetLogs(ctx, crit)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(6)) Expect(len(logs)).To(Equal(2))
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2, test_helpers.MockLog3, test_helpers.MockLog4, test_helpers.MockLog5, test_helpers.MockLog6})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
}) })
It("Filters on contract address if any are provided", func() { It("Filters on contract address if any are provided", func() {
hash := test_helpers.MockBlock.Hash() hash := test_helpers.MockBlock.Hash()
crit := filters.FilterCriteria{ crit := ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Addresses: []common.Address{ Addresses: []common.Address{
test_helpers.Address, test_helpers.Address,
@ -1028,7 +886,7 @@ var _ = Describe("API", func() {
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1}))
hash = test_helpers.MockBlock.Hash() hash = test_helpers.MockBlock.Hash()
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Addresses: []common.Address{ Addresses: []common.Address{
test_helpers.Address, test_helpers.Address,
@ -1051,7 +909,7 @@ var _ = Describe("API", func() {
Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2})) Expect(logs).To(Equal([]*types.Log{test_helpers.MockLog1, test_helpers.MockLog2}))
hash = test_helpers.MockBlock.Hash() hash = test_helpers.MockBlock.Hash()
crit = filters.FilterCriteria{ crit = ethereum.FilterQuery{
BlockHash: &hash, BlockHash: &hash,
Addresses: []common.Address{ Addresses: []common.Address{
test_helpers.Address, test_helpers.Address,
@ -1090,17 +948,8 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
}) })
It("Retrieves the eth balance for the non-existing account address at the block with the provided hash", func() { It("Throws an error for an account it cannot find the balance for", func() {
bal, err := api.GetBalance(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true)) _, err := api.GetBalance(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
})
It("Throws an error for an account of a non-existing block hash", func() {
_, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithHash(randomHash, true))
Expect(err).To(HaveOccurred())
})
It("Throws an error for an account of a non-existing block number", func() {
_, err := api.GetBalance(ctx, test_helpers.AccountAddresss, rpc.BlockNumberOrHashWithNumber(wrongNumber))
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
}) })
@ -1116,10 +965,26 @@ var _ = Describe("API", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode))) Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
}) })
It("Returns `nil` for an account it cannot find the code for", func() { It("Throws an error for an account it cannot find the code for", func() {
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true)) _, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blockHash, true))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(code).To(BeEmpty())
}) })
}) })
}) })
func publishCode(db *postgres.DB, codeHash common.Hash, code []byte) error {
tx, err := db.Beginx()
if err != nil {
return err
}
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
tx.Rollback()
return err
}
if err := shared.PublishDirect(tx, mhKey, code); err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}

View File

@ -18,14 +18,13 @@ package eth
import ( import (
"context" "context"
"database/sql"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -39,62 +38,49 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus"
validator "github.com/vulcanize/eth-ipfs-state-validator/v3/pkg"
ipfsethdb "github.com/vulcanize/ipfs-ethdb/v3/postgres"
ethServerShared "github.com/ethereum/go-ethereum/statediff/indexer/shared" "github.com/vulcanize/ipfs-ethdb"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
shared2 "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
var ( var (
errPendingBlockNumber = errors.New("pending block number not supported") errPendingBlockNumber = errors.New("pending block number not supported")
errNegativeBlockNumber = errors.New("negative block number not supported") errNegativeBlockNumber = errors.New("negative block number not supported")
errHeaderHashNotFound = errors.New("header for hash not found")
errHeaderNotFound = errors.New("header not found")
// errMissingSignature is returned if a block's extra-data section doesn't seem
// to contain a 65 byte secp256k1 signature.
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
) )
const ( const (
RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids RetrieveCanonicalBlockHashByNumber = `SELECT block_hash FROM eth.header_cids
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
WHERE block_hash = (SELECT canonical_header_hash($1))` WHERE id = (SELECT canonical_header_id($1))`
RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids RetrieveCanonicalHeaderByNumber = `SELECT cid, data FROM eth.header_cids
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
WHERE block_hash = (SELECT canonical_header_hash($1))` WHERE id = (SELECT canonical_header_id($1))`
RetrieveTD = `SELECT CAST(td as Text) FROM eth.header_cids RetrieveTD = `SELECT td FROM eth.header_cids
WHERE header_cids.block_hash = $1` WHERE header_cids.block_hash = $1`
RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids RetrieveRPCTransaction = `SELECT blocks.data, block_hash, block_number, index FROM public.blocks, eth.transaction_cids, eth.header_cids
WHERE blocks.key = transaction_cids.mh_key WHERE blocks.key = transaction_cids.mh_key
AND transaction_cids.header_id = header_cids.block_hash AND transaction_cids.header_id = header_cids.id
AND transaction_cids.tx_hash = $1` AND transaction_cids.tx_hash = $1`
RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids RetrieveCodeHashByLeafKeyAndBlockHash = `SELECT code_hash FROM eth.state_accounts, eth.state_cids, eth.header_cids
WHERE state_accounts.header_id = state_cids.header_id AND state_accounts.state_path = state_cids.state_path WHERE state_accounts.state_id = state_cids.id
AND state_cids.header_id = header_cids.block_hash AND state_cids.header_id = header_cids.id
AND state_leaf_key = $1 AND state_leaf_key = $1
AND block_number <= (SELECT block_number AND block_number <= (SELECT block_number
FROM eth.header_cids FROM eth.header_cids
WHERE block_hash = $2) WHERE block_hash = $2)
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number)) AND header_cids.id = (SELECT canonical_header_id(block_number))
ORDER BY block_number DESC ORDER BY block_number DESC
LIMIT 1` LIMIT 1`
RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1` RetrieveCodeByMhKey = `SELECT data FROM public.blocks WHERE key = $1`
) )
const (
StateDBGroupCacheName = "statedb"
)
type Backend struct { type Backend struct {
// underlying postgres db // underlying postgres db
DB *sqlx.DB DB *postgres.DB
// postgres db interfaces // postgres db interfaces
Retriever *CIDRetriever Retriever *CIDRetriever
@ -109,30 +95,15 @@ type Backend struct {
} }
type Config struct { type Config struct {
ChainConfig *params.ChainConfig ChainConfig *params.ChainConfig
VMConfig vm.Config VmConfig vm.Config
DefaultSender *common.Address DefaultSender *common.Address
RPCGasCap *big.Int RPCGasCap *big.Int
GroupCacheConfig *shared.GroupCacheConfig
} }
func NewEthBackend(db *sqlx.DB, c *Config) (*Backend, error) { func NewEthBackend(db *postgres.DB, c *Config) (*Backend, error) {
gcc := c.GroupCacheConfig
groupName := gcc.StateDB.Name
if groupName == "" {
groupName = StateDBGroupCacheName
}
r := NewCIDRetriever(db) r := NewCIDRetriever(db)
ethDB := ipfsethdb.NewDatabase(db, ipfsethdb.CacheConfig{ ethDB := ipfsethdb.NewDatabase(db.DB)
Name: groupName,
Size: gcc.StateDB.CacheSizeInMB * 1024 * 1024,
ExpiryDuration: time.Minute * time.Duration(gcc.StateDB.CacheExpiryInMins),
})
logStateDBStatsOnTimer(ethDB.(*ipfsethdb.Database), gcc)
return &Backend{ return &Backend{
DB: db, DB: db,
Retriever: r, Retriever: r,
@ -203,11 +174,7 @@ func (b *Backend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Bl
if header == nil { if header == nil {
return nil, errors.New("header for hash not found") return nil, errors.New("header for hash not found")
} }
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64()) if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
if err != nil {
return nil, err
}
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
return nil, errors.New("hash is not currently canonical") return nil, errors.New("hash is not currently canonical")
} }
return header, nil return header, nil
@ -230,9 +197,9 @@ func (b *Backend) GetTd(blockHash common.Hash) (*big.Int, error) {
} }
// CurrentBlock returns the current block // CurrentBlock returns the current block
func (b *Backend) CurrentBlock() (*types.Block, error) { func (b *Backend) CurrentBlock() *types.Block {
block, err := b.BlockByNumber(context.Background(), rpc.LatestBlockNumber) block, _ := b.BlockByNumber(context.Background(), rpc.LatestBlockNumber)
return block, err return block
} }
// BlockByNumberOrHash returns block by number or hash // BlockByNumberOrHash returns block by number or hash
@ -248,11 +215,7 @@ func (b *Backend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.Blo
if header == nil { if header == nil {
return nil, errors.New("header for hash not found") return nil, errors.New("header for hash not found")
} }
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64()) if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
if err != nil {
return nil, err
}
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
return nil, errors.New("hash is not currently canonical") return nil, errors.New("hash is not currently canonical")
} }
block, err := b.BlockByHash(ctx, hash) block, err := b.BlockByHash(ctx, hash)
@ -290,20 +253,14 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
return nil, errNegativeBlockNumber return nil, errNegativeBlockNumber
} }
// Get the canonical hash // Get the canonical hash
canonicalHash, err := b.GetCanonicalHash(uint64(number)) canonicalHash := b.GetCanonicalHash(uint64(number))
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
// Retrieve all the CIDs for the block // Retrieve all the CIDs for the block
// TODO: optimize this by retrieving iplds directly rather than the cids first (this is remanent from when we fetched iplds through ipfs blockservice interface) // TODO: optimize this by retrieving iplds directly rather than the cids first (this is remanent from when we fetched iplds through ipfs blockservice interface)
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(canonicalHash) headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(canonicalHash)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
@ -324,65 +281,49 @@ func (b *Backend) BlockByNumber(ctx context.Context, blockNumber rpc.BlockNumber
}() }()
// Fetch and decode the header IPLD // Fetch and decode the header IPLD
var headerIPLD models.IPLDModel headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
var header types.Header var header types.Header
err = rlp.DecodeBytes(headerIPLD.Data, &header) if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
if err != nil {
return nil, err return nil, err
} }
// Fetch and decode the uncle IPLDs // Fetch and decode the uncle IPLDs
var uncleIPLDs []models.IPLDModel uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var uncles []*types.Header var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs { for _, uncleIPLD := range uncleIPLDs {
var uncle types.Header var uncle types.Header
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle) if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
if err != nil {
return nil, err return nil, err
} }
uncles = append(uncles, &uncle) uncles = append(uncles, &uncle)
} }
// Fetch and decode the transaction IPLDs // Fetch and decode the transaction IPLDs
var txIPLDs []models.IPLDModel txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var transactions []*types.Transaction var transactions []*types.Transaction
for _, txIPLD := range txIPLDs { for _, txIPLD := range txIPLDs {
var transaction types.Transaction var transaction types.Transaction
err = transaction.UnmarshalBinary(txIPLD.Data) if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
if err != nil {
return nil, err return nil, err
} }
transactions = append(transactions, &transaction) transactions = append(transactions, &transaction)
} }
// Fetch and decode the receipt IPLDs // Fetch and decode the receipt IPLDs
var rctIPLDs []models.IPLDModel rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var receipts []*types.Receipt var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs { for _, rctIPLD := range rctIPLDs {
var receipt types.Receipt var receipt types.Receipt
nodeVal, err := DecodeLeafNode(rctIPLD.Data) if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
if err != nil {
return nil, err
}
err = receipt.UnmarshalBinary(nodeVal)
if err != nil {
return nil, err return nil, err
} }
receipts = append(receipts, &receipt) receipts = append(receipts, &receipt)
@ -397,9 +338,6 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
// Retrieve all the CIDs for the block // Retrieve all the CIDs for the block
headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash) headerCID, uncleCIDs, txCIDs, rctCIDs, err := b.Retriever.RetrieveBlockByHash(hash)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
@ -420,74 +358,49 @@ func (b *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo
}() }()
// Fetch and decode the header IPLD // Fetch and decode the header IPLD
var headerIPLD models.IPLDModel headerIPLD, err := b.Fetcher.FetchHeader(tx, headerCID)
headerIPLD, err = b.Fetcher.FetchHeader(tx, headerCID)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
var header types.Header var header types.Header
err = rlp.DecodeBytes(headerIPLD.Data, &header) if err := rlp.DecodeBytes(headerIPLD.Data, &header); err != nil {
if err != nil {
return nil, err return nil, err
} }
// Fetch and decode the uncle IPLDs // Fetch and decode the uncle IPLDs
var uncleIPLDs []models.IPLDModel uncleIPLDs, err := b.Fetcher.FetchUncles(tx, uncleCIDs)
uncleIPLDs, err = b.Fetcher.FetchUncles(tx, uncleCIDs)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
var uncles []*types.Header var uncles []*types.Header
for _, uncleIPLD := range uncleIPLDs { for _, uncleIPLD := range uncleIPLDs {
var uncle types.Header var uncle types.Header
err = rlp.DecodeBytes(uncleIPLD.Data, &uncle) if err := rlp.DecodeBytes(uncleIPLD.Data, &uncle); err != nil {
if err != nil {
return nil, err return nil, err
} }
uncles = append(uncles, &uncle) uncles = append(uncles, &uncle)
} }
// Fetch and decode the transaction IPLDs // Fetch and decode the transaction IPLDs
var txIPLDs []models.IPLDModel txIPLDs, err := b.Fetcher.FetchTrxs(tx, txCIDs)
txIPLDs, err = b.Fetcher.FetchTrxs(tx, txCIDs)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
var transactions []*types.Transaction var transactions []*types.Transaction
for _, txIPLD := range txIPLDs { for _, txIPLD := range txIPLDs {
var transaction types.Transaction var transaction types.Transaction
err = transaction.UnmarshalBinary(txIPLD.Data) if err := rlp.DecodeBytes(txIPLD.Data, &transaction); err != nil {
if err != nil {
return nil, err return nil, err
} }
transactions = append(transactions, &transaction) transactions = append(transactions, &transaction)
} }
// Fetch and decode the receipt IPLDs // Fetch and decode the receipt IPLDs
var rctIPLDs []models.IPLDModel rctIPLDs, err := b.Fetcher.FetchRcts(tx, rctCIDs)
rctIPLDs, err = b.Fetcher.FetchRcts(tx, rctCIDs)
if err != nil { if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err return nil, err
} }
var receipts []*types.Receipt var receipts []*types.Receipt
for _, rctIPLD := range rctIPLDs { for _, rctIPLD := range rctIPLDs {
var receipt types.Receipt var receipt types.Receipt
nodeVal, err := DecodeLeafNode(rctIPLD.Data) if err := rlp.DecodeBytes(rctIPLD.Data, &receipt); err != nil {
if err != nil {
return nil, err
}
err = receipt.UnmarshalBinary(nodeVal)
if err != nil {
return nil, err return nil, err
} }
receipts = append(receipts, &receipt) receipts = append(receipts, &receipt)
@ -509,7 +422,7 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
return nil, common.Hash{}, 0, 0, err return nil, common.Hash{}, 0, 0, err
} }
var transaction types.Transaction var transaction types.Transaction
if err := transaction.UnmarshalBinary(tempTxStruct.Data); err != nil { if err := rlp.DecodeBytes(tempTxStruct.Data, &transaction); err != nil {
return nil, common.Hash{}, 0, 0, err return nil, common.Hash{}, 0, 0, err
} }
return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil return &transaction, common.HexToHash(tempTxStruct.BlockHash), tempTxStruct.BlockNumber, tempTxStruct.Index, nil
@ -517,17 +430,16 @@ func (b *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*type
// GetReceipts retrieves receipts for provided block hash // GetReceipts retrieves receipts for provided block hash
func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash) _, receiptBytes, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rcts := make(types.Receipts, len(receiptBytes)) rcts := make(types.Receipts, len(receiptBytes))
for i, rctBytes := range receiptBytes { for i, rctBytes := range receiptBytes {
rct := new(types.Receipt) rct := new(types.Receipt)
if err := rct.UnmarshalBinary(rctBytes); err != nil { if err := rlp.DecodeBytes(rctBytes, rct); err != nil {
return nil, err return nil, err
} }
rct.TxHash = txs[i]
rcts[i] = rct rcts[i] = rct
} }
return rcts, nil return rcts, nil
@ -535,7 +447,7 @@ func (b *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Rece
// GetLogs returns all the logs for the given block hash // GetLogs returns all the logs for the given block hash
func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
_, receiptBytes, txs, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash) _, receiptBytes, err := b.IPLDRetriever.RetrieveReceiptsByBlockHash(hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -545,11 +457,6 @@ func (b *Backend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log
if err := rlp.DecodeBytes(rctBytes, &rct); err != nil { if err := rlp.DecodeBytes(rctBytes, &rct); err != nil {
return nil, err return nil, err
} }
for _, log := range rct.Logs {
log.TxHash = txs[i]
}
logs[i] = rct.Logs logs[i] = rct.Logs
} }
return logs, nil return logs, nil
@ -568,11 +475,7 @@ func (b *Backend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHas
if header == nil { if header == nil {
return nil, nil, errors.New("header for hash not found") return nil, nil, errors.New("header for hash not found")
} }
canonicalHash, err := b.GetCanonicalHash(header.Number.Uint64()) if blockNrOrHash.RequireCanonical && b.GetCanonicalHash(header.Number.Uint64()) != hash {
if err != nil {
return nil, nil, err
}
if blockNrOrHash.RequireCanonical && canonicalHash != hash {
return nil, nil, errors.New("hash is not currently canonical") return nil, nil, errors.New("hash is not currently canonical")
} }
stateDb, err := state.New(header.Root, b.StateDatabase, nil) stateDb, err := state.New(header.Root, b.StateDatabase, nil)
@ -600,12 +503,12 @@ func (b *Backend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNu
} }
// GetCanonicalHash gets the canonical hash for the provided number, if there is one // GetCanonicalHash gets the canonical hash for the provided number, if there is one
func (b *Backend) GetCanonicalHash(number uint64) (common.Hash, error) { func (b *Backend) GetCanonicalHash(number uint64) common.Hash {
var hashResult string var hashResult string
if err := b.DB.Get(&hashResult, RetrieveCanonicalBlockHashByNumber, number); err != nil { if err := b.DB.Get(&hashResult, RetrieveCanonicalBlockHashByNumber, number); err != nil {
return common.Hash{}, err return common.Hash{}
} }
return common.HexToHash(hashResult), nil return common.HexToHash(hashResult)
} }
type rowResult struct { type rowResult struct {
@ -620,17 +523,17 @@ func (b *Backend) GetCanonicalHeader(number uint64) (string, []byte, error) {
} }
// GetEVM constructs and returns a vm.EVM // GetEVM constructs and returns a vm.EVM
func (b *Backend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { func (b *Backend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, error) {
vmError := func() error { return nil } state.SetBalance(msg.From(), math.MaxBig256)
vmctx := core.NewEVMBlockContext(header, b, nil)
txContext := core.NewEVMTxContext(msg) txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(header, b, nil) return vm.NewEVM(vmctx, txContext, state, b.Config.ChainConfig, b.Config.VmConfig), nil
return vm.NewEVM(context, txContext, state, b.Config.ChainConfig, b.Config.VMConfig), vmError, nil
} }
// GetAccountByNumberOrHash returns the account object for the provided address at the block corresponding to the provided number or hash // GetAccountByNumberOrHash returns the account object for the provided address at the block corresponding to the provided number or hash
func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*types.StateAccount, error) { func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*state.Account, error) {
if blockNr, ok := blockNrOrHash.Number(); ok { if blockNr, ok := blockNrOrHash.Number(); ok {
return b.GetAccountByNumber(ctx, address, blockNr) return b.GetAccountByNumber(ctx, address, uint64(blockNr.Int64()))
} }
if hash, ok := blockNrOrHash.Hash(); ok { if hash, ok := blockNrOrHash.Hash(); ok {
return b.GetAccountByHash(ctx, address, hash) return b.GetAccountByHash(ctx, address, hash)
@ -639,56 +542,28 @@ func (b *Backend) GetAccountByNumberOrHash(ctx context.Context, address common.A
} }
// GetAccountByNumber returns the account object for the provided address at the canonical block at the provided height // GetAccountByNumber returns the account object for the provided address at the canonical block at the provided height
func (b *Backend) GetAccountByNumber(ctx context.Context, address common.Address, blockNumber rpc.BlockNumber) (*types.StateAccount, error) { func (b *Backend) GetAccountByNumber(ctx context.Context, address common.Address, number uint64) (*state.Account, error) {
var err error hash := b.GetCanonicalHash(number)
number := blockNumber.Int64() if hash == (common.Hash{}) {
if blockNumber == rpc.LatestBlockNumber { return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
number, err = b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
} }
if blockNumber == rpc.EarliestBlockNumber {
number, err = b.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.PendingBlockNumber {
return nil, errPendingBlockNumber
}
hash, err := b.GetCanonicalHash(uint64(number))
if err == sql.ErrNoRows {
return nil, errHeaderNotFound
} else if err != nil {
return nil, err
}
return b.GetAccountByHash(ctx, address, hash) return b.GetAccountByHash(ctx, address, hash)
} }
// GetAccountByHash returns the account object for the provided address at the block with the provided hash // GetAccountByHash returns the account object for the provided address at the block with the provided hash
func (b *Backend) GetAccountByHash(ctx context.Context, address common.Address, hash common.Hash) (*types.StateAccount, error) { func (b *Backend) GetAccountByHash(ctx context.Context, address common.Address, hash common.Hash) (*state.Account, error) {
_, err := b.HeaderByHash(context.Background(), hash)
if err == sql.ErrNoRows {
return nil, errHeaderHashNotFound
} else if err != nil {
return nil, err
}
_, accountRlp, err := b.IPLDRetriever.RetrieveAccountByAddressAndBlockHash(address, hash) _, accountRlp, err := b.IPLDRetriever.RetrieveAccountByAddressAndBlockHash(address, hash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
acct := new(state.Account)
acct := new(types.StateAccount)
return acct, rlp.DecodeBytes(accountRlp, acct) return acct, rlp.DecodeBytes(accountRlp, acct)
} }
// GetCodeByNumberOrHash returns the byte code for the contract deployed at the provided address at the block with the provided hash or block number // GetCodeByNumberOrHash returns the byte code for the contract deployed at the provided address at the block with the provided hash or block number
func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) ([]byte, error) { func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) ([]byte, error) {
if blockNr, ok := blockNrOrHash.Number(); ok { if blockNr, ok := blockNrOrHash.Number(); ok {
return b.GetCodeByNumber(ctx, address, blockNr) return b.GetCodeByNumber(ctx, address, uint64(blockNr.Int64()))
} }
if hash, ok := blockNrOrHash.Hash(); ok { if hash, ok := blockNrOrHash.Hash(); ok {
return b.GetCodeByHash(ctx, address, hash) return b.GetCodeByHash(ctx, address, hash)
@ -697,28 +572,8 @@ func (b *Backend) GetCodeByNumberOrHash(ctx context.Context, address common.Addr
} }
// GetCodeByNumber returns the byte code for the contract deployed at the provided address at the canonical block with the provided block number // GetCodeByNumber returns the byte code for the contract deployed at the provided address at the canonical block with the provided block number
func (b *Backend) GetCodeByNumber(ctx context.Context, address common.Address, blockNumber rpc.BlockNumber) ([]byte, error) { func (b *Backend) GetCodeByNumber(ctx context.Context, address common.Address, number uint64) ([]byte, error) {
var err error hash := b.GetCanonicalHash(number)
number := blockNumber.Int64()
if blockNumber == rpc.LatestBlockNumber {
number, err = b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.EarliestBlockNumber {
number, err = b.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.PendingBlockNumber {
return nil, errPendingBlockNumber
}
hash, err := b.GetCanonicalHash(uint64(number))
if err != nil {
return nil, err
}
if hash == (common.Hash{}) { if hash == (common.Hash{}) {
return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number) return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
} }
@ -744,12 +599,10 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
err = tx.Commit() err = tx.Commit()
} }
}() }()
err = tx.Get(&codeHash, RetrieveCodeHashByLeafKeyAndBlockHash, leafKey.Hex(), hash.Hex()) if err := tx.Get(&codeHash, RetrieveCodeHashByLeafKeyAndBlockHash, leafKey.Hex(), hash.Hex()); err != nil {
if err != nil {
return nil, err return nil, err
} }
var mhKey string mhKey, err := shared2.MultihashKeyFromKeccak256(common.BytesToHash(codeHash))
mhKey, err = ethServerShared.MultihashKeyFromKeccak256(common.BytesToHash(codeHash))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -759,55 +612,28 @@ func (b *Backend) GetCodeByHash(ctx context.Context, address common.Address, has
} }
// GetStorageByNumberOrHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided number or hash // GetStorageByNumberOrHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided number or hash
func (b *Backend) GetStorageByNumberOrHash(ctx context.Context, address common.Address, key common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { func (b *Backend) GetStorageByNumberOrHash(ctx context.Context, address common.Address, storageLeafKey common.Hash, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) {
if blockNr, ok := blockNrOrHash.Number(); ok { if blockNr, ok := blockNrOrHash.Number(); ok {
return b.GetStorageByNumber(ctx, address, key, blockNr) return b.GetStorageByNumber(ctx, address, storageLeafKey, uint64(blockNr.Int64()))
} }
if hash, ok := blockNrOrHash.Hash(); ok { if hash, ok := blockNrOrHash.Hash(); ok {
return b.GetStorageByHash(ctx, address, key, hash) return b.GetStorageByHash(ctx, address, storageLeafKey, hash)
} }
return nil, errors.New("invalid arguments; neither block nor hash specified") return nil, errors.New("invalid arguments; neither block nor hash specified")
} }
// GetStorageByNumber returns the storage value for the provided contract address an storage key at the block corresponding to the provided number // GetStorageByNumber returns the storage value for the provided contract address an storage key at the block corresponding to the provided number
func (b *Backend) GetStorageByNumber(ctx context.Context, address common.Address, key common.Hash, blockNumber rpc.BlockNumber) (hexutil.Bytes, error) { func (b *Backend) GetStorageByNumber(ctx context.Context, address common.Address, storageLeafKey common.Hash, number uint64) (hexutil.Bytes, error) {
var err error hash := b.GetCanonicalHash(number)
number := blockNumber.Int64() if hash == (common.Hash{}) {
if blockNumber == rpc.LatestBlockNumber { return nil, fmt.Errorf("no canoncial block hash found for provided height (%d)", number)
number, err = b.Retriever.RetrieveLastBlockNumber()
if err != nil {
return nil, err
}
} }
if blockNumber == rpc.EarliestBlockNumber { return b.GetStorageByHash(ctx, address, storageLeafKey, hash)
number, err = b.Retriever.RetrieveFirstBlockNumber()
if err != nil {
return nil, err
}
}
if blockNumber == rpc.PendingBlockNumber {
return nil, errPendingBlockNumber
}
hash, err := b.GetCanonicalHash(uint64(number))
if err == sql.ErrNoRows {
return nil, errHeaderNotFound
} else if err != nil {
return nil, err
}
return b.GetStorageByHash(ctx, address, key, hash)
} }
// GetStorageByHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided hash // GetStorageByHash returns the storage value for the provided contract address an storage key at the block corresponding to the provided hash
func (b *Backend) GetStorageByHash(ctx context.Context, address common.Address, key, hash common.Hash) (hexutil.Bytes, error) { func (b *Backend) GetStorageByHash(ctx context.Context, address common.Address, storageLeafKey, hash common.Hash) (hexutil.Bytes, error) {
_, err := b.HeaderByHash(context.Background(), hash) _, storageRlp, err := b.IPLDRetriever.RetrieveStorageAtByAddressAndStorageKeyAndBlockHash(address, storageLeafKey, hash)
if err == sql.ErrNoRows {
return nil, errHeaderHashNotFound
} else if err != nil {
return nil, err
}
_, _, storageRlp, err := b.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address, key, hash)
return storageRlp, err return storageRlp, err
} }
@ -826,11 +652,6 @@ func (b *Backend) GetHeader(hash common.Hash, height uint64) *types.Header {
return header return header
} }
// ValidateTrie validates the trie for the given stateRoot
func (b *Backend) ValidateTrie(stateRoot common.Hash) error {
return validator.NewValidator(nil, b.EthDB).ValidateTrie(stateRoot)
}
// RPCGasCap returns the configured gas cap for the rpc server // RPCGasCap returns the configured gas cap for the rpc server
func (b *Backend) RPCGasCap() *big.Int { func (b *Backend) RPCGasCap() *big.Int {
return b.Config.RPCGasCap return b.Config.RPCGasCap
@ -863,18 +684,3 @@ func (b *Backend) BloomStatus() (uint64, uint64) {
func (b *Backend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { func (b *Backend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
panic("implement me") panic("implement me")
} }
func logStateDBStatsOnTimer(ethDB *ipfsethdb.Database, gcc *shared.GroupCacheConfig) {
// No stats logging if interval isn't a positive integer.
if gcc.StateDB.LogStatsIntervalInSecs <= 0 {
return
}
ticker := time.NewTicker(time.Duration(gcc.StateDB.LogStatsIntervalInSecs) * time.Second)
go func() {
for range ticker.C {
log.Infof("%s groupcache stats: %+v", StateDBGroupCacheName, ethDB.GetCacheStats())
}
}()
}

View File

@ -23,18 +23,20 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
) )
// RPCMarshalHeader converts the given header to the RPC output. // RPCMarshalHeader converts the given header to the RPC output.
// This function is eth/internal so we have to make our own version here... // This function is eth/internal so we have to make our own version here...
func RPCMarshalHeader(head *types.Header) map[string]interface{} { func RPCMarshalHeader(head *types.Header) map[string]interface{} {
headerMap := map[string]interface{}{ return map[string]interface{}{
"number": (*hexutil.Big)(head.Number), "number": (*hexutil.Big)(head.Number),
"hash": head.Hash(), "hash": head.Hash(),
"parentHash": head.ParentHash, "parentHash": head.ParentHash,
@ -53,11 +55,6 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} {
"transactionsRoot": head.TxHash, "transactionsRoot": head.TxHash,
"receiptsRoot": head.ReceiptHash, "receiptsRoot": head.ReceiptHash,
} }
if head.BaseFee != nil {
headerMap["baseFee"] = head.BaseFee
}
return headerMap
} }
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
@ -120,8 +117,8 @@ func RPCMarshalBlockWithUncleHashes(block *types.Block, uncleHashes []common.Has
} }
fields["transactions"] = transactions fields["transactions"] = transactions
} }
fields["uncles"] = uncleHashes fields["uncles"] = uncleHashes
return fields, nil return fields, nil
} }
@ -137,15 +134,14 @@ func NewRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa
// NewRPCTransaction returns a transaction that will serialize to the RPC // NewRPCTransaction returns a transaction that will serialize to the RPC
// representation, with the given location metadata set (if available). // representation, with the given location metadata set (if available).
func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction { func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction {
var signer types.Signer var signer types.Signer = types.FrontierSigner{}
if tx.Protected() { if tx.Protected() {
signer = types.LatestSignerForChainID(tx.ChainId()) signer = types.NewEIP155Signer(tx.ChainId())
} else {
signer = types.HomesteadSigner{}
} }
from, _ := types.Sender(signer, tx) from, _ := types.Sender(signer, tx)
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
result := &RPCTransaction{ result := &RPCTransaction{
From: from, From: from,
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
@ -155,7 +151,6 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
To: tx.To(), To: tx.To(),
Value: (*hexutil.Big)(tx.Value()), Value: (*hexutil.Big)(tx.Value()),
Type: hexutil.Uint64(tx.Type()),
V: (*hexutil.Big)(v), V: (*hexutil.Big)(v),
R: (*hexutil.Big)(r), R: (*hexutil.Big)(r),
S: (*hexutil.Big)(s), S: (*hexutil.Big)(s),
@ -165,26 +160,6 @@ func NewRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
result.TransactionIndex = (*hexutil.Uint64)(&index) result.TransactionIndex = (*hexutil.Uint64)(&index)
} }
switch tx.Type() {
case types.AccessListTxType:
al := tx.AccessList()
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
case types.DynamicFeeTxType:
al := tx.AccessList()
result.Accesses = &al
result.ChainID = (*hexutil.Big)(tx.ChainId())
result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap())
result.GasTipCap = (*hexutil.Big)(tx.GasTipCap())
// if the transaction has been mined, compute the effective gas price
if baseFee != nil && blockHash != (common.Hash{}) {
// price = min(tip, gasFeeCap - baseFee) + baseFee
price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap())
result.GasPrice = (*hexutil.Big)(price)
} else {
result.GasPrice = nil
}
}
return result return result
} }
@ -265,7 +240,55 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti
if index >= uint64(len(txs)) { if index >= uint64(len(txs)) {
return nil return nil
} }
return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee()) return NewRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index)
}
// extractLogsOfInterest returns logs from the receipt IPLD
func extractLogsOfInterest(rctIPLDs []ipfs.BlockModel, wantedTopics [][]string) ([]*types.Log, error) {
var logs []*types.Log
for _, rctIPLD := range rctIPLDs {
rctRLP := rctIPLD
var rct types.Receipt
if err := rlp.DecodeBytes(rctRLP.Data, &rct); err != nil {
return nil, err
}
for _, log := range rct.Logs {
if wanted := wantedLog(wantedTopics, log.Topics); wanted == true {
logs = append(logs, log)
}
}
}
return logs, nil
}
// returns true if the log matches on the filter
func wantedLog(wantedTopics [][]string, actualTopics []common.Hash) bool {
// actualTopics will always have length <= 4
// wantedTopics will always have length 4
matches := 0
for i, actualTopic := range actualTopics {
// If we have topics in this filter slot, count as a match if the actualTopic matches one of the ones in this filter slot
if len(wantedTopics[i]) > 0 {
matches += sliceContainsHash(wantedTopics[i], actualTopic)
} else {
// Filter slot is empty, not matching any topics at this slot => counts as a match
matches++
}
}
if matches == len(actualTopics) {
return true
}
return false
}
// returns 1 if the slice contains the hash, 0 if it does not
func sliceContainsHash(slice []string, hash common.Hash) int {
for _, str := range slice {
if str == hash.String() {
return 1
}
}
return 0
} }
func toFilterArg(q ethereum.FilterQuery) (interface{}, error) { func toFilterArg(q ethereum.FilterQuery) (interface{}, error) {

View File

@ -22,14 +22,14 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/lib/pq" "github.com/lib/pq"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
// Retriever interface for substituting mocks in tests // Retriever interface for substituting mocks in tests
@ -41,74 +41,13 @@ type Retriever interface {
// CIDRetriever satisfies the CIDRetriever interface for ethereum // CIDRetriever satisfies the CIDRetriever interface for ethereum
type CIDRetriever struct { type CIDRetriever struct {
db *sqlx.DB db *postgres.DB
gormDB *gorm.DB
}
type IPLDModelRecord struct {
models.IPLDModel
}
// TableName overrides the table name used by IPLD
func (IPLDModelRecord) TableName() string {
return "public.blocks"
}
type HeaderCIDRecord struct {
CID string `gorm:"column:cid"`
BlockHash string `gorm:"primaryKey"`
BlockNumber string
ParentHash string
Timestamp uint64
StateRoot string
TotalDifficulty string `gorm:"column:td"`
TxRoot string
RctRoot string `gorm:"column:receipt_root"`
UncleRoot string
Bloom []byte
MhKey string
// gorm doesn't check if foreign key exists in database.
// It is required to eager load relations using preload.
TransactionCIDs []TransactionCIDRecord `gorm:"foreignKey:HeaderID;references:BlockHash"`
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
}
// TableName overrides the table name used by HeaderCIDRecord
func (HeaderCIDRecord) TableName() string {
return "eth.header_cids"
}
type TransactionCIDRecord struct {
CID string `gorm:"column:cid"`
TxHash string `gorm:"primaryKey"`
HeaderID string `gorm:"column:header_id"`
Index int64
Src string
Dst string
MhKey string
IPLD IPLDModelRecord `gorm:"foreignKey:MhKey;references:Key"`
}
// TableName overrides the table name used by TransactionCIDRecord
func (TransactionCIDRecord) TableName() string {
return "eth.transaction_cids"
} }
// NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface // NewCIDRetriever returns a pointer to a new CIDRetriever which supports the CIDRetriever interface
func NewCIDRetriever(db *sqlx.DB) *CIDRetriever { func NewCIDRetriever(db *postgres.DB) *CIDRetriever {
gormDB, err := gorm.Open(postgres.New(postgres.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
log.Error(err)
return nil
}
return &CIDRetriever{ return &CIDRetriever{
db: db, db: db,
gormDB: gormDB,
} }
} }
@ -147,10 +86,9 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
}() }()
// Retrieve cached header CIDs at this block height // Retrieve cached header CIDs at this block height
var headers []models.HeaderModel headers, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
headers, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil { if err != nil {
log.Error("header cid retrieval error", err) log.Error("header cid retrieval error")
return nil, true, err return nil, true, err
} }
cws := make([]CIDWrapper, len(headers)) cws := make([]CIDWrapper, len(headers))
@ -163,8 +101,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
empty = false empty = false
if filter.HeaderFilter.Uncles { if filter.HeaderFilter.Uncles {
// Retrieve uncle cids for this header id // Retrieve uncle cids for this header id
var uncleCIDs []models.UncleModel uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, header.ID)
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, header.BlockHash)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return nil, true, err return nil, true, err
@ -174,7 +111,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
} }
// Retrieve cached trx CIDs // Retrieve cached trx CIDs
if !filter.TxFilter.Off { if !filter.TxFilter.Off {
cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.BlockHash) cw.Transactions, err = ecr.RetrieveTxCIDs(tx, filter.TxFilter, header.ID)
if err != nil { if err != nil {
log.Error("transaction cid retrieval error") log.Error("transaction cid retrieval error")
return nil, true, err return nil, true, err
@ -183,13 +120,13 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
empty = false empty = false
} }
} }
trxHashes := make([]string, len(cw.Transactions)) trxIds := make([]int64, len(cw.Transactions))
for j, t := range cw.Transactions { for j, tx := range cw.Transactions {
trxHashes[j] = t.TxHash trxIds[j] = tx.ID
} }
// Retrieve cached receipt CIDs // Retrieve cached receipt CIDs
if !filter.ReceiptFilter.Off { if !filter.ReceiptFilter.Off {
cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.BlockHash, trxHashes) cw.Receipts, err = ecr.RetrieveRctCIDsByHeaderID(tx, filter.ReceiptFilter, header.ID, trxIds)
if err != nil { if err != nil {
log.Error("receipt cid retrieval error") log.Error("receipt cid retrieval error")
return nil, true, err return nil, true, err
@ -200,7 +137,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
} }
// Retrieve cached state CIDs // Retrieve cached state CIDs
if !filter.StateFilter.Off { if !filter.StateFilter.Off {
cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.BlockHash) cw.StateNodes, err = ecr.RetrieveStateCIDs(tx, filter.StateFilter, header.ID)
if err != nil { if err != nil {
log.Error("state cid retrieval error") log.Error("state cid retrieval error")
return nil, true, err return nil, true, err
@ -211,7 +148,7 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
} }
// Retrieve cached storage CIDs // Retrieve cached storage CIDs
if !filter.StorageFilter.Off { if !filter.StorageFilter.Off {
cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.BlockHash) cw.StorageNodes, err = ecr.RetrieveStorageCIDs(tx, filter.StorageFilter, header.ID)
if err != nil { if err != nil {
log.Error("storage cid retrieval error") log.Error("storage cid retrieval error")
return nil, true, err return nil, true, err
@ -227,36 +164,35 @@ func (ecr *CIDRetriever) Retrieve(filter SubscriptionSettings, blockNumber int64
} }
// RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight // RetrieveHeaderCIDs retrieves and returns all of the header cids at the provided blockheight
func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]models.HeaderModel, error) { func (ecr *CIDRetriever) RetrieveHeaderCIDs(tx *sqlx.Tx, blockNumber int64) ([]eth.HeaderModel, error) {
log.Debug("retrieving header cids for block ", blockNumber) log.Debug("retrieving header cids for block ", blockNumber)
headers := make([]models.HeaderModel, 0) headers := make([]eth.HeaderModel, 0)
pgStr := `SELECT CAST(block_number as Text), block_hash,parent_hash,cid,mh_key,CAST(td as Text),node_id, pgStr := `SELECT * FROM eth.header_cids
CAST(reward as Text), state_root, uncle_root, tx_root, receipt_root, bloom, timestamp, times_validated,
coinbase FROM eth.header_cids
WHERE block_number = $1` WHERE block_number = $1`
return headers, tx.Select(&headers, pgStr, blockNumber) return headers, tx.Select(&headers, pgStr, blockNumber)
} }
// RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header // RetrieveUncleCIDsByHeaderID retrieves and returns all of the uncle cids for the provided header
func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.UncleModel, error) { func (ecr *CIDRetriever) RetrieveUncleCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.UncleModel, error) {
log.Debug("retrieving uncle cids for block id ", headerID) log.Debug("retrieving uncle cids for block id ", headerID)
headers := make([]models.UncleModel, 0) headers := make([]eth.UncleModel, 0)
pgStr := `SELECT header_id,block_hash,parent_hash,cid,mh_key, CAST(reward as text) FROM eth.uncle_cids pgStr := `SELECT * FROM eth.uncle_cids
WHERE header_id = $1` WHERE header_id = $1`
return headers, tx.Select(&headers, pgStr, headerID) return headers, tx.Select(&headers, pgStr, headerID)
} }
// RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters // RetrieveTxCIDs retrieves and returns all of the trx cids at the provided blockheight that conform to the provided filter parameters
// also returns the ids for the returned transaction cids // also returns the ids for the returned transaction cids
func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID string) ([]models.TxModel, error) { func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID int64) ([]eth.TxModel, error) {
log.Debug("retrieving transaction cids for header id ", headerID) log.Debug("retrieving transaction cids for header id ", headerID)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
results := make([]models.TxModel, 0) results := make([]eth.TxModel, 0)
id := 1 id := 1
pgStr := fmt.Sprintf(`SELECT transaction_cids.tx_hash, transaction_cids.header_id,transaction_cids.cid, transaction_cids.mh_key, pgStr := fmt.Sprintf(`SELECT transaction_cids.id, transaction_cids.header_id,
transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data transaction_cids.tx_hash, transaction_cids.cid, transaction_cids.mh_key,
FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) transaction_cids.dst, transaction_cids.src, transaction_cids.index, transaction_cids.tx_data
WHERE header_cids.block_hash = $%d`, id) FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
WHERE header_cids.id = $%d`, id)
args = append(args, headerID) args = append(args, headerID)
id++ id++
if len(txFilter.Dst) > 0 { if len(txFilter.Dst) > 0 {
@ -272,181 +208,97 @@ func (ecr *CIDRetriever) RetrieveTxCIDs(tx *sqlx.Tx, txFilter TxFilter, headerID
return results, tx.Select(&results, pgStr, args...) return results, tx.Select(&results, pgStr, args...)
} }
func topicFilterCondition(id *int, topics [][]string, args []interface{}, pgStr string, first bool) (string, []interface{}) { // RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
for i, topicSet := range topics { // filter parameters and correspond to the provided tx ids
if len(topicSet) == 0 { func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID int64, trxIds []int64) ([]eth.ReceiptModel, error) {
continue log.Debug("retrieving receipt cids for header id ", headerID)
} args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
if !first { receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
pgStr += " AND" receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
} else { FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
first = false WHERE receipt_cids.tx_id = transaction_cids.id
} AND transaction_cids.header_id = header_cids.id
pgStr += fmt.Sprintf(` eth.log_cids.topic%d = ANY ($%d)`, i, *id) AND header_cids.id = $1`
args = append(args, pq.Array(topicSet)) id := 2
*id++ args = append(args, headerID)
}
return pgStr, args
}
func logFilterCondition(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter) (string, []interface{}) {
if len(rctFilter.LogAddresses) > 0 {
pgStr += fmt.Sprintf(` AND eth.log_cids.address = ANY ($%d)`, *id)
args = append(args, pq.Array(rctFilter.LogAddresses))
*id++
}
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false)
}
return pgStr, args
}
func receiptFilterConditions(id *int, pgStr string, args []interface{}, rctFilter ReceiptFilter, txHashes []string) (string, []interface{}) {
rctCond := " AND (receipt_cids.tx_id = ANY ( "
logQuery := "SELECT rct_id FROM eth.log_cids WHERE"
if len(rctFilter.LogAddresses) > 0 { if len(rctFilter.LogAddresses) > 0 {
// Filter on log contract addresses if there are any // Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(`%s %s eth.log_cids.address = ANY ($%d)`, rctCond, logQuery, *id) pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
args = append(args, pq.Array(rctFilter.LogAddresses)) args = append(args, pq.Array(rctFilter.LogAddresses))
*id++ id++
// Filter on topics if there are any // Filter on topics if there are any
if hasTopics(rctFilter.Topics) { if hasTopics(rctFilter.Topics) {
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, false) pgStr += " AND ("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
} }
pgStr += ")" pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
// Filter on txHashes if there are any, and we are matching txs if rctFilter.MatchTxs && len(trxIds) > 0 {
if rctFilter.MatchTxs && len(txHashes) > 0 { pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id) args = append(args, pq.Array(trxIds))
args = append(args, pq.Array(txHashes))
} }
pgStr += ")" pgStr += ")"
} else { // If there are no contract addresses to filter on } else { // If there are no contract addresses to filter on
// Filter on topics if there are any // Filter on topics if there are any
if hasTopics(rctFilter.Topics) { if hasTopics(rctFilter.Topics) {
pgStr += rctCond + logQuery pgStr += " AND (("
pgStr, args = topicFilterCondition(id, rctFilter.Topics, args, pgStr, true) first := true
pgStr += ")" for i, topicSet := range rctFilter.Topics {
// Filter on txHashes if there are any, and we are matching txs if i < 4 && len(topicSet) > 0 {
if rctFilter.MatchTxs && len(txHashes) > 0 { if first {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d)`, *id) pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
args = append(args, pq.Array(txHashes)) first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
} }
pgStr += ")" pgStr += ")"
} else if rctFilter.MatchTxs && len(txHashes) > 0 { // Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
// If there are no contract addresses or topics to filter on, // If there are no contract addresses or topics to filter on,
// Filter on txHashes if there are any, and we are matching txs // Filter on txIDs if there are any and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d)`, *id) pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(txHashes)) args = append(args, pq.Array(trxIds))
} }
} }
return pgStr, args
}
// RetrieveRctCIDsByHeaderID retrieves and returns all of the rct cids at the provided header ID that conform to the provided
// filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDsByHeaderID(tx *sqlx.Tx, rctFilter ReceiptFilter, headerID string, trxHashes []string) ([]models.ReceiptModel, error) {
log.Debug("retrieving receipt cids for header id ", headerID)
args := make([]interface{}, 0, 4)
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key,
receipt_cids.contract, receipt_cids.contract_hash
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_hash = $1`
id := 2
args = append(args, headerID)
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, trxHashes)
pgStr += ` ORDER BY transaction_cids.index` pgStr += ` ORDER BY transaction_cids.index`
receiptCIDs := make([]models.ReceiptModel, 0) receiptCids := make([]eth.ReceiptModel, 0)
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...) return receiptCids, tx.Select(&receiptCids, pgStr, args...)
}
// RetrieveFilteredGQLLogs retrieves and returns all the log cIDs provided blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredGQLLogs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
id := 1
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, data, eth.receipt_cids.leaf_cid as cid, eth.receipt_cids.post_status
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids, public.blocks
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
AND receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND log_cids.leaf_mh_key = blocks.key AND header_cids.block_hash = $1`
args = append(args, blockHash.String())
id++
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
}
// RetrieveFilteredLog retrieves and returns all the log cIDs provided blockHeight or blockHash that conform to the provided
// filter parameters.
func (ecr *CIDRetriever) RetrieveFilteredLog(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash) ([]LogResult, error) {
log.Debug("retrieving log cids for receipt ids")
args := make([]interface{}, 0, 4)
pgStr := `SELECT eth.log_cids.leaf_cid, eth.log_cids.index, eth.log_cids.rct_id,
eth.log_cids.address, eth.log_cids.topic0, eth.log_cids.topic1, eth.log_cids.topic2, eth.log_cids.topic3,
eth.log_cids.log_data, eth.transaction_cids.tx_hash, eth.transaction_cids.index as txn_index,
header_cids.block_hash, CAST(header_cids.block_number as Text)
FROM eth.log_cids, eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE eth.log_cids.rct_id = receipt_cids.tx_id
AND receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash`
id := 1
if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
args = append(args, blockNumber)
id++
}
if blockHash != nil {
pgStr += fmt.Sprintf(` AND header_cids.block_hash = $%d`, id)
args = append(args, blockHash.String())
id++
}
pgStr, args = logFilterCondition(&id, pgStr, args, rctFilter)
pgStr += ` ORDER BY log_cids.index`
logCIDs := make([]LogResult, 0)
err := tx.Select(&logCIDs, pgStr, args...)
if err != nil {
return nil, err
}
return logCIDs, nil
} }
// RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided // RetrieveRctCIDs retrieves and returns all of the rct cids at the provided blockheight or block hash that conform to the provided
// filter parameters and correspond to the provided tx ids // filter parameters and correspond to the provided tx ids
func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, txHashes []string) ([]models.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, blockNumber int64, blockHash *common.Hash, trxIds []int64) ([]eth.ReceiptModel, error) {
log.Debug("retrieving receipt cids for block ", blockNumber) log.Debug("retrieving receipt cids for block ", blockNumber)
args := make([]interface{}, 0, 5) args := make([]interface{}, 0, 5)
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, receipt_cids.tx_id pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash WHERE receipt_cids.tx_id = transaction_cids.id
AND transaction_cids.header_id = header_cids.block_hash` AND transaction_cids.header_id = header_cids.id`
id := 1 id := 1
if blockNumber > 0 { if blockNumber > 0 {
pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id) pgStr += fmt.Sprintf(` AND header_cids.block_number = $%d`, id)
@ -458,12 +310,70 @@ func (ecr *CIDRetriever) RetrieveRctCIDs(tx *sqlx.Tx, rctFilter ReceiptFilter, b
args = append(args, blockHash.String()) args = append(args, blockHash.String())
id++ id++
} }
if len(rctFilter.LogAddresses) > 0 {
pgStr, args = receiptFilterConditions(&id, pgStr, args, rctFilter, txHashes) // Filter on log contract addresses if there are any
pgStr += fmt.Sprintf(` AND ((receipt_cids.log_contracts && $%d::VARCHAR(66)[]`, id)
args = append(args, pq.Array(rctFilter.LogAddresses))
id++
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += " AND ("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else { // If there are no contract addresses to filter on
// Filter on topics if there are any
if hasTopics(rctFilter.Topics) {
pgStr += " AND (("
first := true
for i, topicSet := range rctFilter.Topics {
if i < 4 && len(topicSet) > 0 {
if first {
pgStr += fmt.Sprintf(`receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
first = false
} else {
pgStr += fmt.Sprintf(` AND receipt_cids.topic%ds && $%d::VARCHAR(66)[]`, i, id)
}
args = append(args, pq.Array(topicSet))
id++
}
}
pgStr += ")"
// Filter on txIDs if there are any and we are matching txs
if rctFilter.MatchTxs && len(trxIds) > 0 {
pgStr += fmt.Sprintf(` OR receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
pgStr += ")"
} else if rctFilter.MatchTxs && len(trxIds) > 0 {
// If there are no contract addresses or topics to filter on,
// Filter on txIDs if there are any and we are matching txs
pgStr += fmt.Sprintf(` AND receipt_cids.tx_id = ANY($%d::INTEGER[])`, id)
args = append(args, pq.Array(trxIds))
}
}
pgStr += ` ORDER BY transaction_cids.index` pgStr += ` ORDER BY transaction_cids.index`
receiptCIDs := make([]models.ReceiptModel, 0) receiptCids := make([]eth.ReceiptModel, 0)
return receiptCIDs, tx.Select(&receiptCIDs, pgStr, args...) return receiptCids, tx.Select(&receiptCids, pgStr, args...)
} }
func hasTopics(topics [][]string) bool { func hasTopics(topics [][]string) bool {
@ -476,13 +386,13 @@ func hasTopics(topics [][]string) bool {
} }
// RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters // RetrieveStateCIDs retrieves and returns all of the state node cids at the provided header ID that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID string) ([]models.StateNodeModel, error) { func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter, headerID int64) ([]eth.StateNodeModel, error) {
log.Debug("retrieving state cids for header id ", headerID) log.Debug("retrieving state cids for header id ", headerID)
args := make([]interface{}, 0, 2) args := make([]interface{}, 0, 2)
pgStr := `SELECT state_cids.header_id, pgStr := `SELECT state_cids.id, state_cids.header_id,
state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path state_cids.state_leaf_key, state_cids.node_type, state_cids.cid, state_cids.mh_key, state_cids.state_path
FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) FROM eth.state_cids INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
WHERE header_cids.block_hash = $1` WHERE header_cids.id = $1`
args = append(args, headerID) args = append(args, headerID)
addrLen := len(stateFilter.Addresses) addrLen := len(stateFilter.Addresses)
if addrLen > 0 { if addrLen > 0 {
@ -496,20 +406,20 @@ func (ecr *CIDRetriever) RetrieveStateCIDs(tx *sqlx.Tx, stateFilter StateFilter,
if !stateFilter.IntermediateNodes { if !stateFilter.IntermediateNodes {
pgStr += ` AND state_cids.node_type = 2` pgStr += ` AND state_cids.node_type = 2`
} }
stateNodeCIDs := make([]models.StateNodeModel, 0) stateNodeCIDs := make([]eth.StateNodeModel, 0)
return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...) return stateNodeCIDs, tx.Select(&stateNodeCIDs, pgStr, args...)
} }
// RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters // RetrieveStorageCIDs retrieves and returns all of the storage node cids at the provided header id that conform to the provided filter parameters
func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID string) ([]models.StorageNodeWithStateKeyModel, error) { func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageFilter, headerID int64) ([]eth.StorageNodeWithStateKeyModel, error) {
log.Debug("retrieving storage cids for header id ", headerID) log.Debug("retrieving storage cids for header id ", headerID)
args := make([]interface{}, 0, 3) args := make([]interface{}, 0, 3)
pgStr := `SELECT storage_cids.header_id, storage_cids.storage_leaf_key, storage_cids.node_type, pgStr := `SELECT storage_cids.id, storage_cids.state_id, storage_cids.storage_leaf_key, storage_cids.node_type,
storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, storage_cids.state_path, state_cids.state_leaf_key storage_cids.cid, storage_cids.mh_key, storage_cids.storage_path, state_cids.state_leaf_key
FROM eth.storage_cids, eth.state_cids, eth.header_cids FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE storage_cids.header_id = state_cids.header_id AND storage_cids.state_path = state_cids.state_path WHERE storage_cids.state_id = state_cids.id
AND state_cids.header_id = header_cids.block_hash AND state_cids.header_id = header_cids.id
AND header_cids.block_hash = $1` AND header_cids.id = $1`
args = append(args, headerID) args = append(args, headerID)
id := 2 id := 2
addrLen := len(storageFilter.Addresses) addrLen := len(storageFilter.Addresses)
@ -529,18 +439,18 @@ func (ecr *CIDRetriever) RetrieveStorageCIDs(tx *sqlx.Tx, storageFilter StorageF
if !storageFilter.IntermediateNodes { if !storageFilter.IntermediateNodes {
pgStr += ` AND storage_cids.node_type = 2` pgStr += ` AND storage_cids.node_type = 2`
} }
storageNodeCIDs := make([]models.StorageNodeWithStateKeyModel, 0) storageNodeCIDs := make([]eth.StorageNodeWithStateKeyModel, 0)
return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...) return storageNodeCIDs, tx.Select(&storageNodeCIDs, pgStr, args...)
} }
// RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash // RetrieveBlockByHash returns all of the CIDs needed to compose an entire block, for a given block hash
func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) {
log.Debug("retrieving block cids for block hash ", blockHash.String()) log.Debug("retrieving block cids for block hash ", blockHash.String())
// Begin new db tx // Begin new db tx
tx, err := ecr.db.Beginx() tx, err := ecr.db.Beginx()
if err != nil { if err != nil {
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
defer func() { defer func() {
if p := recover(); p != nil { if p := recover(); p != nil {
@ -553,30 +463,26 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
} }
}() }()
var headerCID models.HeaderModel headerCID, err := ecr.RetrieveHeaderCIDByHash(tx, blockHash)
headerCID, err = ecr.RetrieveHeaderCIDByHash(tx, blockHash)
if err != nil { if err != nil {
log.Error("header cid retrieval error") log.Error("header cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
var uncleCIDs []models.UncleModel uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.ID)
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID.BlockHash)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
var txCIDs []models.TxModel txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.ID)
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID.BlockHash)
if err != nil { if err != nil {
log.Error("tx cid retrieval error") log.Error("tx cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
txHashes := make([]string, len(txCIDs)) txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs { for i, txCID := range txCIDs {
txHashes[i] = txCID.TxHash txIDs[i] = txCID.ID
} }
var rctCIDs []models.ReceiptModel rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
if err != nil { if err != nil {
log.Error("rct cid retrieval error") log.Error("rct cid retrieval error")
} }
@ -584,13 +490,13 @@ func (ecr *CIDRetriever) RetrieveBlockByHash(blockHash common.Hash) (models.Head
} }
// RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number // RetrieveBlockByNumber returns all of the CIDs needed to compose an entire block, for a given block number
func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.HeaderModel, []models.UncleModel, []models.TxModel, []models.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (eth.HeaderModel, []eth.UncleModel, []eth.TxModel, []eth.ReceiptModel, error) {
log.Debug("retrieving block cids for block number ", blockNumber) log.Debug("retrieving block cids for block number ", blockNumber)
// Begin new db tx // Begin new db tx
tx, err := ecr.db.Beginx() tx, err := ecr.db.Beginx()
if err != nil { if err != nil {
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
defer func() { defer func() {
if p := recover(); p != nil { if p := recover(); p != nil {
@ -603,33 +509,29 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
} }
}() }()
var headerCID []models.HeaderModel headerCID, err := ecr.RetrieveHeaderCIDs(tx, blockNumber)
headerCID, err = ecr.RetrieveHeaderCIDs(tx, blockNumber)
if err != nil { if err != nil {
log.Error("header cid retrieval error") log.Error("header cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
if len(headerCID) < 1 { if len(headerCID) < 1 {
return models.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber) return eth.HeaderModel{}, nil, nil, nil, fmt.Errorf("header cid retrieval error, no header CIDs found at block %d", blockNumber)
} }
var uncleCIDs []models.UncleModel uncleCIDs, err := ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].ID)
uncleCIDs, err = ecr.RetrieveUncleCIDsByHeaderID(tx, headerCID[0].BlockHash)
if err != nil { if err != nil {
log.Error("uncle cid retrieval error") log.Error("uncle cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
var txCIDs []models.TxModel txCIDs, err := ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].ID)
txCIDs, err = ecr.RetrieveTxCIDsByHeaderID(tx, headerCID[0].BlockHash)
if err != nil { if err != nil {
log.Error("tx cid retrieval error") log.Error("tx cid retrieval error")
return models.HeaderModel{}, nil, nil, nil, err return eth.HeaderModel{}, nil, nil, nil, err
} }
txHashes := make([]string, len(txCIDs)) txIDs := make([]int64, len(txCIDs))
for i, txCID := range txCIDs { for i, txCID := range txCIDs {
txHashes[i] = txCID.TxHash txIDs[i] = txCID.ID
} }
var rctCIDs []models.ReceiptModel rctCIDs, err := ecr.RetrieveReceiptCIDsByTxIDs(tx, txIDs)
rctCIDs, err = ecr.RetrieveReceiptCIDsByTxIDs(tx, txHashes)
if err != nil { if err != nil {
log.Error("rct cid retrieval error") log.Error("rct cid retrieval error")
} }
@ -637,89 +539,34 @@ func (ecr *CIDRetriever) RetrieveBlockByNumber(blockNumber int64) (models.Header
} }
// RetrieveHeaderCIDByHash returns the header for the given block hash // RetrieveHeaderCIDByHash returns the header for the given block hash
func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (models.HeaderModel, error) { func (ecr *CIDRetriever) RetrieveHeaderCIDByHash(tx *sqlx.Tx, blockHash common.Hash) (eth.HeaderModel, error) {
log.Debug("retrieving header cids for block hash ", blockHash.String()) log.Debug("retrieving header cids for block hash ", blockHash.String())
pgStr := `SELECT block_hash, CAST(block_number as Text), parent_hash, cid, mh_key, CAST(td as Text), pgStr := `SELECT * FROM eth.header_cids
state_root, uncle_root, tx_root, receipt_root, bloom, timestamp FROM eth.header_cids
WHERE block_hash = $1` WHERE block_hash = $1`
var headerCID models.HeaderModel var headerCID eth.HeaderModel
return headerCID, tx.Get(&headerCID, pgStr, blockHash.String()) return headerCID, tx.Get(&headerCID, pgStr, blockHash.String())
} }
// RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id // RetrieveTxCIDsByHeaderID retrieves all tx CIDs for the given header id
func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID string) ([]models.TxModel, error) { func (ecr *CIDRetriever) RetrieveTxCIDsByHeaderID(tx *sqlx.Tx, headerID int64) ([]eth.TxModel, error) {
log.Debug("retrieving tx cids for block id ", headerID) log.Debug("retrieving tx cids for block id ", headerID)
pgStr := `SELECT * FROM eth.transaction_cids pgStr := `SELECT * FROM eth.transaction_cids
WHERE header_id = $1 WHERE header_id = $1
ORDER BY index` ORDER BY index`
var txCIDs []models.TxModel var txCIDs []eth.TxModel
return txCIDs, tx.Select(&txCIDs, pgStr, headerID) return txCIDs, tx.Select(&txCIDs, pgStr, headerID)
} }
// RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs // RetrieveReceiptCIDsByTxIDs retrieves receipt CIDs by their associated tx IDs
func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txHashes []string) ([]models.ReceiptModel, error) { func (ecr *CIDRetriever) RetrieveReceiptCIDsByTxIDs(tx *sqlx.Tx, txIDs []int64) ([]eth.ReceiptModel, error) {
log.Debugf("retrieving receipt cids for tx hashes %v", txHashes) log.Debugf("retrieving receipt cids for tx ids %v", txIDs)
pgStr := `SELECT receipt_cids.tx_id, receipt_cids.leaf_cid, receipt_cids.leaf_mh_key, pgStr := `SELECT receipt_cids.id, receipt_cids.tx_id, receipt_cids.cid, receipt_cids.mh_key,
receipt_cids.contract, receipt_cids.contract_hash receipt_cids.contract, receipt_cids.contract_hash, receipt_cids.topic0s, receipt_cids.topic1s,
receipt_cids.topic2s, receipt_cids.topic3s, receipt_cids.log_contracts
FROM eth.receipt_cids, eth.transaction_cids FROM eth.receipt_cids, eth.transaction_cids
WHERE tx_id = ANY($1) WHERE tx_id = ANY($1::INTEGER[])
AND receipt_cids.tx_id = transaction_cids.tx_hash AND receipt_cids.tx_id = transaction_cids.id
ORDER BY transaction_cids.index` ORDER BY transaction_cids.index`
var rctCIDs []models.ReceiptModel var rctCIDs []eth.ReceiptModel
return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txHashes)) return rctCIDs, tx.Select(&rctCIDs, pgStr, pq.Array(txIDs))
}
// RetrieveHeaderAndTxCIDsByBlockNumber retrieves header CIDs and their associated tx CIDs by block number
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockNumber(blockNumber int64) ([]HeaderCIDRecord, error) {
log.Debug("retrieving header cids and tx cids for block number ", blockNumber)
var headerCIDs []HeaderCIDRecord
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
}).Joins("IPLD").Find(&headerCIDs, "block_number = ?", blockNumber).Error
if err != nil {
log.Error("header cid retrieval error")
return nil, err
}
return headerCIDs, nil
}
// RetrieveHeaderAndTxCIDsByBlockHash retrieves header CID and their associated tx CIDs by block hash
func (ecr *CIDRetriever) RetrieveHeaderAndTxCIDsByBlockHash(blockHash common.Hash) (HeaderCIDRecord, error) {
log.Debug("retrieving header cid and tx cids for block hash ", blockHash.String())
var headerCID HeaderCIDRecord
// https://github.com/go-gorm/gorm/issues/4083#issuecomment-778883283
// Will use join for TransactionCIDs once preload for 1:N is supported.
err := ecr.gormDB.Preload("TransactionCIDs", func(tx *gorm.DB) *gorm.DB {
return tx.Select("cid", "tx_hash", "index", "src", "dst", "header_id")
}).Joins("IPLD").First(&headerCID, "block_hash = ?", blockHash.String()).Error
if err != nil {
log.Error("header cid retrieval error")
return headerCID, err
}
return headerCID, nil
}
// RetrieveTxCIDByHash returns the tx for the given tx hash
func (ecr *CIDRetriever) RetrieveTxCIDByHash(txHash string) (TransactionCIDRecord, error) {
log.Debug("retrieving tx cid for tx hash ", txHash)
var txCID TransactionCIDRecord
err := ecr.gormDB.Joins("IPLD").First(&txCID, "tx_hash = ?", txHash).Error
if err != nil {
log.Error("header cid retrieval error")
return txCID, err
}
return txCID, nil
} }

View File

@ -19,19 +19,19 @@ package eth_test
import ( import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/trie"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/trie"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
var ( var (
@ -211,64 +211,45 @@ var (
var _ = Describe("Retriever", func() { var _ = Describe("Retriever", func() {
var ( var (
db *sqlx.DB db *postgres.DB
diffIndexer interfaces.StateDiffIndexer repo *eth2.IPLDPublisher
retriever *eth.CIDRetriever retriever *eth.CIDRetriever
) )
BeforeEach(func() { BeforeEach(func() {
db = shared.SetupDB() var err error
diffIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash()) db, err = shared.SetupDB()
Expect(err).ToNot(HaveOccurred())
repo = eth2.NewIPLDPublisher(db)
retriever = eth.NewCIDRetriever(db) retriever = eth.NewCIDRetriever(db)
}) })
AfterEach(func() { AfterEach(func() {
shared.TearDownDB(db) eth.TearDownDB(db)
}) })
Describe("Retrieve", func() { Describe("Retrieve", func() {
BeforeEach(func() { BeforeEach(func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty()) err := repo.Publish(test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred())
for _, node := range test_helpers.MockStateNodes {
err = diffIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() { It("Retrieves all CIDs for the given blocknumber when provided an open filter", func() {
type rctCIDAndMHKeyResult struct {
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
}
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
cids, empty, err := retriever.Retrieve(openFilter, 1) cids, empty, err := retriever.Retrieve(openFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids)).To(Equal(1)) Expect(len(cids)).To(Equal(1))
Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
expectedHeaderCID := test_helpers.MockCIDWrapper.Header expectedHeaderCID := test_helpers.MockCIDWrapper.Header
expectedHeaderCID.BlockHash = cids[0].Header.BlockHash expectedHeaderCID.ID = cids[0].Header.ID
expectedHeaderCID.NodeID = cids[0].Header.NodeID expectedHeaderCID.NodeID = cids[0].Header.NodeID
Expect(cids[0].Header).To(Equal(expectedHeaderCID)) Expect(cids[0].Header).To(Equal(expectedHeaderCID))
Expect(len(cids[0].Transactions)).To(Equal(4)) Expect(len(cids[0].Transactions)).To(Equal(3))
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[0].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[1].CID)).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids[0].Transactions, test_helpers.MockCIDWrapper.Transactions[2].CID)).To(BeTrue())
Expect(len(cids[0].Receipts)).To(Equal(4)) Expect(len(cids[0].Receipts)).To(Equal(3))
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[0].CID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[1].CID)).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids[0].Receipts, test_helpers.MockCIDWrapper.Receipts[2].CID)).To(BeTrue())
Expect(len(cids[0].StateNodes)).To(Equal(2)) Expect(len(cids[0].StateNodes)).To(Equal(2))
for _, stateNode := range cids[0].StateNodes { for _, stateNode := range cids[0].StateNodes {
if stateNode.CID == test_helpers.State1CID.String() { if stateNode.CID == test_helpers.State1CID.String() {
Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex())) Expect(stateNode.StateKey).To(Equal(common.BytesToHash(test_helpers.ContractLeafKey).Hex()))
@ -283,37 +264,25 @@ var _ = Describe("Retriever", func() {
} }
Expect(len(cids[0].StorageNodes)).To(Equal(1)) Expect(len(cids[0].StorageNodes)).To(Equal(1))
expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes expectedStorageNodeCIDs := test_helpers.MockCIDWrapper.StorageNodes
expectedStorageNodeCIDs[0].HeaderID = cids[0].StorageNodes[0].HeaderID expectedStorageNodeCIDs[0].ID = cids[0].StorageNodes[0].ID
expectedStorageNodeCIDs[0].StatePath = cids[0].StorageNodes[0].StatePath expectedStorageNodeCIDs[0].StateID = cids[0].StorageNodes[0].StateID
Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs)) Expect(cids[0].StorageNodes).To(Equal(expectedStorageNodeCIDs))
}) })
It("Applies filters from the provided config.Subscription", func() { It("Applies filters from the provided config.Subscription", func() {
type rctCIDAndMHKeyResult struct {
LeafCID string `db:"leaf_cid"`
LeafMhKey string `db:"leaf_mh_key"`
}
expectedRctCIDsAndLeafNodes := make([]rctCIDAndMHKeyResult, 0)
pgStr := `SELECT receipt_cids.leaf_cid, receipt_cids.leaf_mh_key FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
err := db.Select(&expectedRctCIDsAndLeafNodes, pgStr, test_helpers.BlockNumber.Uint64())
cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1) cids1, empty, err := retriever.Retrieve(rctAddressFilter, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids1)).To(Equal(1)) Expect(len(cids1)).To(Equal(1))
Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids1[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids1[0].Header).To(Equal(models.HeaderModel{})) Expect(cids1[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids1[0].Transactions)).To(Equal(0)) Expect(len(cids1[0].Transactions)).To(Equal(0))
Expect(len(cids1[0].StateNodes)).To(Equal(0)) Expect(len(cids1[0].StateNodes)).To(Equal(0))
Expect(len(cids1[0].StorageNodes)).To(Equal(0)) Expect(len(cids1[0].StorageNodes)).To(Equal(0))
Expect(len(cids1[0].Receipts)).To(Equal(1)) Expect(len(cids1[0].Receipts)).To(Equal(1))
expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0] expectedReceiptCID := test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.ID = cids1[0].Receipts[0].ID
expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID expectedReceiptCID.TxID = cids1[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID)) Expect(cids1[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1) cids2, empty, err := retriever.Retrieve(rctTopicsFilter, 1)
@ -321,15 +290,14 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids2)).To(Equal(1)) Expect(len(cids2)).To(Equal(1))
Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids2[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids2[0].Header).To(Equal(models.HeaderModel{})) Expect(cids2[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids2[0].Transactions)).To(Equal(0)) Expect(len(cids2[0].Transactions)).To(Equal(0))
Expect(len(cids2[0].StateNodes)).To(Equal(0)) Expect(len(cids2[0].StateNodes)).To(Equal(0))
Expect(len(cids2[0].StorageNodes)).To(Equal(0)) Expect(len(cids2[0].StorageNodes)).To(Equal(0))
Expect(len(cids2[0].Receipts)).To(Equal(1)) Expect(len(cids2[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0] expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.ID = cids2[0].Receipts[0].ID
expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID expectedReceiptCID.TxID = cids2[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID)) Expect(cids2[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1) cids3, empty, err := retriever.Retrieve(rctTopicsAndAddressFilter, 1)
@ -337,15 +305,14 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids3)).To(Equal(1)) Expect(len(cids3)).To(Equal(1))
Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids3[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids3[0].Header).To(Equal(models.HeaderModel{})) Expect(cids3[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids3[0].Transactions)).To(Equal(0)) Expect(len(cids3[0].Transactions)).To(Equal(0))
Expect(len(cids3[0].StateNodes)).To(Equal(0)) Expect(len(cids3[0].StateNodes)).To(Equal(0))
Expect(len(cids3[0].StorageNodes)).To(Equal(0)) Expect(len(cids3[0].StorageNodes)).To(Equal(0))
Expect(len(cids3[0].Receipts)).To(Equal(1)) Expect(len(cids3[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0] expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[0]
expectedReceiptCID.ID = cids3[0].Receipts[0].ID
expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID expectedReceiptCID.TxID = cids3[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[0].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[0].LeafMhKey
Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID)) Expect(cids3[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1) cids4, empty, err := retriever.Retrieve(rctAddressesAndTopicFilter, 1)
@ -353,15 +320,14 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids4)).To(Equal(1)) Expect(len(cids4)).To(Equal(1))
Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids4[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids4[0].Header).To(Equal(models.HeaderModel{})) Expect(cids4[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids4[0].Transactions)).To(Equal(0)) Expect(len(cids4[0].Transactions)).To(Equal(0))
Expect(len(cids4[0].StateNodes)).To(Equal(0)) Expect(len(cids4[0].StateNodes)).To(Equal(0))
Expect(len(cids4[0].StorageNodes)).To(Equal(0)) Expect(len(cids4[0].StorageNodes)).To(Equal(0))
Expect(len(cids4[0].Receipts)).To(Equal(1)) Expect(len(cids4[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1] expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
expectedReceiptCID.ID = cids4[0].Receipts[0].ID
expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID expectedReceiptCID.TxID = cids4[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID)) Expect(cids4[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1) cids5, empty, err := retriever.Retrieve(rctsForAllCollectedTrxs, 1)
@ -369,36 +335,35 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids5)).To(Equal(1)) Expect(len(cids5)).To(Equal(1))
Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids5[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids5[0].Header).To(Equal(models.HeaderModel{})) Expect(cids5[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids5[0].Transactions)).To(Equal(4)) Expect(len(cids5[0].Transactions)).To(Equal(3))
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx1CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx2CID.String())).To(BeTrue())
Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue()) Expect(eth.TxModelsContainsCID(cids5[0].Transactions, test_helpers.Trx3CID.String())).To(BeTrue())
Expect(len(cids5[0].StateNodes)).To(Equal(0)) Expect(len(cids5[0].StateNodes)).To(Equal(0))
Expect(len(cids5[0].StorageNodes)).To(Equal(0)) Expect(len(cids5[0].StorageNodes)).To(Equal(0))
Expect(len(cids5[0].Receipts)).To(Equal(4)) Expect(len(cids5[0].Receipts)).To(Equal(3))
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[0].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct1CID.String())).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[1].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct2CID.String())).To(BeTrue())
Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, expectedRctCIDsAndLeafNodes[2].LeafCID)).To(BeTrue()) Expect(eth.ReceiptModelsContainsCID(cids5[0].Receipts, test_helpers.Rct3CID.String())).To(BeTrue())
cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1) cids6, empty, err := retriever.Retrieve(rctsForSelectCollectedTrxs, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids6)).To(Equal(1)) Expect(len(cids6)).To(Equal(1))
Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids6[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids6[0].Header).To(Equal(models.HeaderModel{})) Expect(cids6[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids6[0].Transactions)).To(Equal(1)) Expect(len(cids6[0].Transactions)).To(Equal(1))
expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1] expectedTxCID := test_helpers.MockCIDWrapper.Transactions[1]
expectedTxCID.TxHash = cids6[0].Transactions[0].TxHash expectedTxCID.ID = cids6[0].Transactions[0].ID
expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID expectedTxCID.HeaderID = cids6[0].Transactions[0].HeaderID
Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID)) Expect(cids6[0].Transactions[0]).To(Equal(expectedTxCID))
Expect(len(cids6[0].StateNodes)).To(Equal(0)) Expect(len(cids6[0].StateNodes)).To(Equal(0))
Expect(len(cids6[0].StorageNodes)).To(Equal(0)) Expect(len(cids6[0].StorageNodes)).To(Equal(0))
Expect(len(cids6[0].Receipts)).To(Equal(1)) Expect(len(cids6[0].Receipts)).To(Equal(1))
expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1] expectedReceiptCID = test_helpers.MockCIDWrapper.Receipts[1]
expectedReceiptCID.ID = cids6[0].Receipts[0].ID
expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID expectedReceiptCID.TxID = cids6[0].Receipts[0].TxID
expectedReceiptCID.LeafCID = expectedRctCIDsAndLeafNodes[1].LeafCID
expectedReceiptCID.LeafMhKey = expectedRctCIDsAndLeafNodes[1].LeafMhKey
Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID)) Expect(cids6[0].Receipts[0]).To(Equal(expectedReceiptCID))
cids7, empty, err := retriever.Retrieve(stateFilter, 1) cids7, empty, err := retriever.Retrieve(stateFilter, 1)
@ -406,12 +371,13 @@ var _ = Describe("Retriever", func() {
Expect(empty).ToNot(BeTrue()) Expect(empty).ToNot(BeTrue())
Expect(len(cids7)).To(Equal(1)) Expect(len(cids7)).To(Equal(1))
Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber)) Expect(cids7[0].BlockNumber).To(Equal(test_helpers.MockCIDWrapper.BlockNumber))
Expect(cids7[0].Header).To(Equal(models.HeaderModel{})) Expect(cids7[0].Header).To(Equal(eth2.HeaderModel{}))
Expect(len(cids7[0].Transactions)).To(Equal(0)) Expect(len(cids7[0].Transactions)).To(Equal(0))
Expect(len(cids7[0].Receipts)).To(Equal(0)) Expect(len(cids7[0].Receipts)).To(Equal(0))
Expect(len(cids7[0].StorageNodes)).To(Equal(0)) Expect(len(cids7[0].StorageNodes)).To(Equal(0))
Expect(len(cids7[0].StateNodes)).To(Equal(1)) Expect(len(cids7[0].StateNodes)).To(Equal(1))
Expect(cids7[0].StateNodes[0]).To(Equal(models.StateNodeModel{ Expect(cids7[0].StateNodes[0]).To(Equal(eth2.StateNodeModel{
ID: cids7[0].StateNodes[0].ID,
HeaderID: cids7[0].StateNodes[0].HeaderID, HeaderID: cids7[0].StateNodes[0].HeaderID,
NodeType: 2, NodeType: 2,
StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(), StateKey: common.BytesToHash(test_helpers.AccountLeafKey).Hex(),
@ -432,12 +398,8 @@ var _ = Describe("Retriever", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty()) err := repo.Publish(test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
@ -446,12 +408,8 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the first block that has data in the database", func() { It("Gets the number of the first block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101) payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty()) err := repo.Publish(payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))
@ -462,16 +420,10 @@ var _ = Describe("Retriever", func() {
payload1.Block = newMockBlock(1010101) payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.Block = newMockBlock(5) payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty()) err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err) err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveFirstBlockNumber() num, err := retriever.RetrieveFirstBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(5))) Expect(num).To(Equal(int64(5)))
@ -484,11 +436,8 @@ var _ = Describe("Retriever", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
tx, err := diffIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty()) err := repo.Publish(test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1))) Expect(num).To(Equal(int64(1)))
@ -497,12 +446,8 @@ var _ = Describe("Retriever", func() {
It("Gets the number of the latest block that has data in the database", func() { It("Gets the number of the latest block that has data in the database", func() {
payload := test_helpers.MockConvertedPayload payload := test_helpers.MockConvertedPayload
payload.Block = newMockBlock(1010101) payload.Block = newMockBlock(1010101)
tx, err := diffIndexer.PushBlock(payload.Block, payload.Receipts, payload.Block.Difficulty()) err := repo.Publish(payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))
@ -513,16 +458,10 @@ var _ = Describe("Retriever", func() {
payload1.Block = newMockBlock(1010101) payload1.Block = newMockBlock(1010101)
payload2 := payload1 payload2 := payload1
payload2.Block = newMockBlock(5) payload2.Block = newMockBlock(5)
tx, err := diffIndexer.PushBlock(payload1.Block, payload1.Receipts, payload1.Block.Difficulty()) err := repo.Publish(payload1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err) err = repo.Publish(payload2)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx, err = diffIndexer.PushBlock(payload2.Block, payload2.Receipts, payload2.Block.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
num, err := retriever.RetrieveLastBlockNumber() num, err := retriever.RetrieveLastBlockNumber()
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(num).To(Equal(int64(1010101))) Expect(num).To(Equal(int64(1010101)))

View File

@ -21,7 +21,6 @@ import (
"context" "context"
"io/ioutil" "io/ioutil"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -30,17 +29,18 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff" "github.com/ethereum/go-ethereum/statediff"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
var ( var (
@ -60,12 +60,11 @@ func init() {
} }
var _ = Describe("eth state reading tests", func() { var _ = Describe("eth state reading tests", func() {
const chainLength = 5
var ( var (
blocks []*types.Block blocks []*types.Block
receipts []types.Receipts receipts []types.Receipts
chain *core.BlockChain chain *core.BlockChain
db *sqlx.DB db *postgres.DB
api *eth.PublicEthAPI api *eth.PublicEthAPI
backend *eth.Backend backend *eth.Backend
chainConfig = params.TestChainConfig chainConfig = params.TestChainConfig
@ -75,27 +74,19 @@ var _ = Describe("eth state reading tests", func() {
It("test init", func() { It("test init", func() {
// db and type initializations // db and type initializations
var err error var err error
db = shared.SetupDB() db, err = shared.SetupDB()
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash()) Expect(err).ToNot(HaveOccurred())
transformer := eth2.NewStateDiffTransformer(chainConfig, db)
backend, err = eth.NewEthBackend(db, &eth.Config{ backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig, ChainConfig: chainConfig,
VMConfig: vm.Config{}, VmConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000), // Max gas capacity for a rpc call. RPCGasCap: big.NewInt(10000000000),
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "eth_state_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
}) })
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
api, _ = eth.NewPublicEthAPI(backend, nil, false, false, false) api = eth.NewPublicEthAPI(backend, nil, false)
// make the test blockchain (and state) // make the test blockchain (and state)
blocks, receipts, chain = test_helpers.MakeChain(chainLength, test_helpers.Genesis, test_helpers.TestChainGen) blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
params := statediff.Params{ params := statediff.Params{
IntermediateStateNodes: true, IntermediateStateNodes: true,
IntermediateStorageNodes: true, IntermediateStorageNodes: true,
@ -144,45 +135,35 @@ var _ = Describe("eth state reading tests", func() {
} }
diff, err := builder.BuildStateDiffObject(args, params) diff, err := builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
tx, err := transformer.PushBlock(block, rcts, mockTD) diffRlp, err := rlp.EncodeToBytes(diff)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
blockRlp, err := rlp.EncodeToBytes(block)
for _, node := range diff.Nodes { Expect(err).ToNot(HaveOccurred())
err = transformer.PushStateNode(tx, node, block.Hash().String()) receiptsRlp, err := rlp.EncodeToBytes(rcts)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
payload := statediff.Payload{
StateObjectRlp: diffRlp,
BlockRlp: blockRlp,
ReceiptsRlp: receiptsRlp,
TotalDifficulty: mockTD,
} }
err = tx.Submit(err) _, err = transformer.Transform(0, payload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
} }
// Insert some non-canonical data into the database so that we test our ability to discern canonicity // Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash()) indexAndPublisher := eth2.NewIPLDPublisher(db)
api = eth.NewPublicEthAPI(backend, nil, false)
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty()) err = indexAndPublisher.Publish(test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child // The non-canonical header has a child
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty()) err = indexAndPublisher.Publish(test_helpers.MockConvertedPayloadForChild)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
err = publishCode(db, test_helpers.ContractCodeHash, test_helpers.ContractCode)
hash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, hash)
Expect(err).ToNot(HaveOccurred())
// wait for tx batch process to complete.
time.Sleep(10000 * time.Millisecond)
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })
defer It("test teardown", func() { defer It("test teardown", func() {
shared.TearDownDB(db) eth.TearDownDB(db)
chain.Stop() chain.Stop()
}) })
@ -253,15 +234,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(1)) Expect(err).To(HaveOccurred())
Expect(err).ToNot(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) _, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).To(HaveOccurred())
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(1)) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(1))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1)) Expect(bal).To(Equal(expectedBankBalanceBlock1))
@ -269,15 +247,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2)) Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -285,15 +260,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3)) Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -301,15 +273,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4)) Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -317,15 +286,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5)) Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4)) Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -338,15 +304,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true)) Expect(err).To(HaveOccurred())
Expect(err).ToNot(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
_, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true)) _, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[1].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock1)) Expect(bal).To(Equal(expectedBankBalanceBlock1))
@ -354,15 +317,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock2)) Expect(bal).To(Equal(expectedAcct2BalanceBlock2))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[2].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -370,15 +330,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock3)) Expect(bal).To(Equal(expectedAcct2BalanceBlock3))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -386,15 +343,12 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock1)) Expect(bal).To(Equal(expectedAcct1BalanceBlock1))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4)) Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[4].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
@ -402,45 +356,37 @@ var _ = Describe("eth state reading tests", func() {
bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct1BalanceBlock5)) Expect(bal).To(Equal(expectedAcct1BalanceBlock5))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedAcct2BalanceBlock4)) Expect(bal).To(Equal(expectedAcct2BalanceBlock4))
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedContractBalance)) Expect(bal).To(Equal(expectedContractBalance))
bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true)) bal, err = api.GetBalance(ctx, test_helpers.TestBankAddress, rpc.BlockNumberOrHashWithHash(blocks[5].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal(expectedBankBalanceBlock2)) Expect(bal).To(Equal(expectedBankBalanceBlock2))
}) })
It("Returns `0` for an account it cannot find the balance for an account at the provided block number", func() { It("Throws an error for an account it cannot find the balance for an account at the provided block number", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0)) _, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithNumber(0)) Expect(err).To(HaveOccurred())
Expect(err).ToNot(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) _, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).To(HaveOccurred())
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0)) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
}) })
It("Returns `0` for an error for an account it cannot find the balance for an account at the provided block hash", func() { It("Throws an error for an account it cannot find the balance for an account at the provided block hash", func() {
bal, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true)) _, err := api.GetBalance(ctx, test_helpers.Account1Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
_, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
bal, err = api.GetBalance(ctx, test_helpers.Account2Addr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true)) Expect(err).To(HaveOccurred())
Expect(err).ToNot(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(bal).To(Equal((*hexutil.Big)(common.Big0))) _, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true))
Expect(err).To(HaveOccurred())
bal, err = api.GetBalance(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithHash(blocks[0].Hash(), true)) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
Expect(err).ToNot(HaveOccurred())
Expect(bal).To(Equal((*hexutil.Big)(common.Big0)))
}) })
}) })
@ -463,63 +409,51 @@ var _ = Describe("eth state reading tests", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode))) Expect(code).To(Equal((hexutil.Bytes)(test_helpers.ContractCode)))
}) })
It("Returns `nil` for an account it cannot find the code for", func() { It("Throws an error for an account it cannot find the code for", func() {
code, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true)) _, err := api.GetCode(ctx, randomAddr, rpc.BlockNumberOrHashWithHash(blocks[3].Hash(), true))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(code).To(BeEmpty())
}) })
It("Returns `nil` for a contract that doesn't exist at this height", func() { It("Throws an error for a contract that doesn't exist at this hieght", func() {
code, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0)) _, err := api.GetCode(ctx, test_helpers.ContractAddr, rpc.BlockNumberOrHashWithNumber(0))
Expect(err).ToNot(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(code).To(BeEmpty())
}) })
}) })
Describe("eth_getStorageAt", func() { Describe("eth_getStorageAt", func() {
It("Returns empty slice if it tries to access a contract which does not exist", func() { It("Throws an error if it tries to access a contract which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0)) _, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
storage, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1)) _, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(1))
Expect(err).NotTo(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
}) })
It("Returns empty slice if it tries to access a contract slot which does not exist", func() { It("Throws an error if it tries to access a contract slot which does not exist", func() {
storage, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2)) _, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, randomHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
Expect(err).NotTo(HaveOccurred()) Expect(err).To(HaveOccurred())
Expect(storage).To(Equal(hexutil.Bytes(eth.EmptyNodeValue))) Expect(err.Error()).To(ContainSubstring("sql: no rows in result set"))
}) })
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() { It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash or number", func() {
// After deployment // After deployment
val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(2)) val, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(2))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
expectedRes := hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")) expectedRes := hexutil.Bytes(common.Hex2Bytes("01"))
Expect(val).To(Equal(expectedRes)) Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(3)) val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(3))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000003")) expectedRes = hexutil.Bytes(common.Hex2Bytes("03"))
Expect(val).To(Equal(expectedRes)) Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(4)) val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(4))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
expectedRes = hexutil.Bytes(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000009")) expectedRes = hexutil.Bytes(common.Hex2Bytes("09"))
Expect(val).To(Equal(expectedRes)) Expect(val).To(Equal(expectedRes))
val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(5)) val, err = api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.ContractSlotKeyHash.Hex(), rpc.BlockNumberOrHashWithNumber(5))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(val).To(Equal(hexutil.Bytes(eth.EmptyNodeValue))) Expect(val).To(Equal(hexutil.Bytes{}))
})
It("Throws an error for a non-existing block hash", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithHash(randomHash, true))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header for hash not found"))
})
It("Throws an error for a non-existing block number", func() {
_, err := api.GetStorageAt(ctx, test_helpers.ContractAddr, test_helpers.IndexOne, rpc.BlockNumberOrHashWithNumber(chainLength+1))
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError("header not found"))
}) })
}) })

View File

@ -25,7 +25,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func TestETHSuite(t *testing.T) { func TestETHWatcher(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "eth ipld server eth suite test") RunSpecs(t, "eth ipld server eth suite test")
} }

View File

@ -19,20 +19,22 @@ package eth
import ( import (
"bytes" "bytes"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
) )
// Filterer interface for substituing mocks in tests // Filterer interface for substituing mocks in tests
type Filterer interface { type Filterer interface {
Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error)
} }
// ResponseFilterer satisfies the ResponseFilterer interface for ethereum // ResponseFilterer satisfies the ResponseFilterer interface for ethereum
@ -44,7 +46,7 @@ func NewResponseFilterer() *ResponseFilterer {
} }
// Filter is used to filter through eth data to extract and package requested data into a Payload // Filter is used to filter through eth data to extract and package requested data into a Payload
func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload ConvertedPayload) (*IPLDs, error) { func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload eth.ConvertedPayload) (*IPLDs, error) {
if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) { if checkRange(filter.Start.Int64(), filter.End.Int64(), payload.Block.Number().Int64()) {
response := new(IPLDs) response := new(IPLDs)
response.TotalDifficulty = payload.TotalDifficulty response.TotalDifficulty = payload.TotalDifficulty
@ -71,7 +73,7 @@ func (s *ResponseFilterer) Filter(filter SubscriptionSettings, payload Converted
return nil, nil return nil, nil
} }
func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload ConvertedPayload) error { func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IPLDs, payload eth.ConvertedPayload) error {
if !headerFilter.Off { if !headerFilter.Off {
headerRLP, err := rlp.EncodeToBytes(payload.Block.Header()) headerRLP, err := rlp.EncodeToBytes(payload.Block.Header())
if err != nil { if err != nil {
@ -81,12 +83,12 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
if err != nil { if err != nil {
return err return err
} }
response.Header = models.IPLDModel{ response.Header = ipfs.BlockModel{
Data: headerRLP, Data: headerRLP,
Key: cid.String(), CID: cid.String(),
} }
if headerFilter.Uncles { if headerFilter.Uncles {
response.Uncles = make([]models.IPLDModel, len(payload.Block.Body().Uncles)) response.Uncles = make([]ipfs.BlockModel, len(payload.Block.Body().Uncles))
for i, uncle := range payload.Block.Body().Uncles { for i, uncle := range payload.Block.Body().Uncles {
uncleRlp, err := rlp.EncodeToBytes(uncle) uncleRlp, err := rlp.EncodeToBytes(uncle)
if err != nil { if err != nil {
@ -96,9 +98,9 @@ func (s *ResponseFilterer) filterHeaders(headerFilter HeaderFilter, response *IP
if err != nil { if err != nil {
return err return err
} }
response.Uncles[i] = models.IPLDModel{ response.Uncles[i] = ipfs.BlockModel{
Data: uncleRlp, Data: uncleRlp,
Key: cid.String(), CID: cid.String(),
} }
} }
} }
@ -113,12 +115,12 @@ func checkRange(start, end, actual int64) bool {
return false return false
} }
func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload ConvertedPayload) ([]common.Hash, error) { func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLDs, payload eth.ConvertedPayload) ([]common.Hash, error) {
var trxHashes []common.Hash var trxHashes []common.Hash
if !trxFilter.Off { if !trxFilter.Off {
trxLen := len(payload.Block.Body().Transactions) trxLen := len(payload.Block.Body().Transactions)
trxHashes = make([]common.Hash, 0, trxLen) trxHashes = make([]common.Hash, 0, trxLen)
response.Transactions = make([]models.IPLDModel, 0, trxLen) response.Transactions = make([]ipfs.BlockModel, 0, trxLen)
for i, trx := range payload.Block.Body().Transactions { for i, trx := range payload.Block.Body().Transactions {
// TODO: check if want corresponding receipt and if we do we must include this transaction // TODO: check if want corresponding receipt and if we do we must include this transaction
if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) { if checkTransactionAddrs(trxFilter.Src, trxFilter.Dst, payload.TxMetaData[i].Src, payload.TxMetaData[i].Dst) {
@ -131,9 +133,9 @@ func (s *ResponseFilterer) filterTransactions(trxFilter TxFilter, response *IPLD
if err != nil { if err != nil {
return nil, err return nil, err
} }
response.Transactions = append(response.Transactions, models.IPLDModel{ response.Transactions = append(response.Transactions, ipfs.BlockModel{
Data: data, Data: data,
Key: cid.String(), CID: cid.String(),
}) })
trxHashes = append(trxHashes, trx.Hash()) trxHashes = append(trxHashes, trx.Hash())
} }
@ -161,30 +163,25 @@ func checkTransactionAddrs(wantedSrc, wantedDst []string, actualSrc, actualDst s
return false return false
} }
func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload ConvertedPayload, trxHashes []common.Hash) error { func (s *ResponseFilterer) filerReceipts(receiptFilter ReceiptFilter, response *IPLDs, payload eth.ConvertedPayload, trxHashes []common.Hash) error {
if !receiptFilter.Off { if !receiptFilter.Off {
response.Receipts = make([]models.IPLDModel, 0, len(payload.Receipts)) response.Receipts = make([]ipfs.BlockModel, 0, len(payload.Receipts))
rctLeafCID, rctIPLDData, err := GetRctLeafNodeData(payload.Receipts) for i, receipt := range payload.Receipts {
if err != nil {
return err
}
for idx, receipt := range payload.Receipts {
// topics is always length 4 // topics is always length 4
topics := make([][]string, 4) topics := [][]string{payload.ReceiptMetaData[i].Topic0s, payload.ReceiptMetaData[i].Topic1s, payload.ReceiptMetaData[i].Topic2s, payload.ReceiptMetaData[i].Topic3s}
contracts := make([]string, len(receipt.Logs)) if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, payload.ReceiptMetaData[i].LogContracts, trxHashes) {
for _, l := range receipt.Logs { receiptBuffer := new(bytes.Buffer)
contracts = append(contracts, l.Address.String()) if err := receipt.EncodeRLP(receiptBuffer); err != nil {
for idx, t := range l.Topics { return err
topics[idx] = append(topics[idx], t.String())
} }
} data := receiptBuffer.Bytes()
cid, err := ipld.RawdataToCid(ipld.MEthTxReceipt, data, multihash.KECCAK_256)
// TODO: Verify this filter logic. if err != nil {
if checkReceipts(receipt, receiptFilter.Topics, topics, receiptFilter.LogAddresses, contracts, trxHashes) { return err
response.Receipts = append(response.Receipts, models.IPLDModel{ }
Data: rctIPLDData[idx], response.Receipts = append(response.Receipts, ipfs.BlockModel{
Key: rctLeafCID[idx].String(), Data: data,
CID: cid.String(),
}) })
} }
} }
@ -256,7 +253,7 @@ func slicesShareString(slice1, slice2 []string) int {
} }
// filterStateAndStorage filters state and storage nodes into the response according to the provided filters // filterStateAndStorage filters state and storage nodes into the response according to the provided filters
func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload ConvertedPayload) error { func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storageFilter StorageFilter, response *IPLDs, payload eth.ConvertedPayload) error {
response.StateNodes = make([]StateNode, 0, len(payload.StateNodes)) response.StateNodes = make([]StateNode, 0, len(payload.StateNodes))
response.StorageNodes = make([]StorageNode, 0) response.StorageNodes = make([]StorageNode, 0)
stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses)) stateAddressFilters := make([]common.Hash, len(stateFilter.Addresses))
@ -273,37 +270,37 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
} }
for _, stateNode := range payload.StateNodes { for _, stateNode := range payload.StateNodes {
if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) { if !stateFilter.Off && checkNodeKeys(stateAddressFilters, stateNode.LeafKey) {
if stateNode.NodeType == sdtypes.Leaf || stateFilter.IntermediateNodes { if stateNode.Type == sdtypes.Leaf || stateFilter.IntermediateNodes {
cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.NodeValue, multihash.KECCAK_256) cid, err := ipld.RawdataToCid(ipld.MEthStateTrie, stateNode.Value, multihash.KECCAK_256)
if err != nil { if err != nil {
return err return err
} }
response.StateNodes = append(response.StateNodes, StateNode{ response.StateNodes = append(response.StateNodes, StateNode{
StateLeafKey: common.BytesToHash(stateNode.LeafKey), StateLeafKey: stateNode.LeafKey,
Path: stateNode.Path, Path: stateNode.Path,
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: stateNode.NodeValue, Data: stateNode.Value,
Key: cid.String(), CID: cid.String(),
}, },
Type: stateNode.NodeType, Type: stateNode.Type,
}) })
} }
} }
if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) { if !storageFilter.Off && checkNodeKeys(storageAddressFilters, stateNode.LeafKey) {
for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] { for _, storageNode := range payload.StorageNodes[common.Bytes2Hex(stateNode.Path)] {
if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) { if checkNodeKeys(storageKeyFilters, storageNode.LeafKey) {
cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.NodeValue, multihash.KECCAK_256) cid, err := ipld.RawdataToCid(ipld.MEthStorageTrie, storageNode.Value, multihash.KECCAK_256)
if err != nil { if err != nil {
return err return err
} }
response.StorageNodes = append(response.StorageNodes, StorageNode{ response.StorageNodes = append(response.StorageNodes, StorageNode{
StateLeafKey: common.BytesToHash(stateNode.LeafKey), StateLeafKey: stateNode.LeafKey,
StorageLeafKey: common.BytesToHash(storageNode.LeafKey), StorageLeafKey: storageNode.LeafKey,
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: storageNode.NodeValue, Data: storageNode.Value,
Key: cid.String(), CID: cid.String(),
}, },
Type: storageNode.NodeType, Type: storageNode.Type,
Path: storageNode.Path, Path: storageNode.Path,
}) })
} }
@ -313,52 +310,15 @@ func (s *ResponseFilterer) filterStateAndStorage(stateFilter StateFilter, storag
return nil return nil
} }
func checkNodeKeys(wantedKeys []common.Hash, actualKey []byte) bool { func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
// If we aren't filtering for any specific keys, all nodes are a go // If we aren't filtering for any specific keys, all nodes are a go
if len(wantedKeys) == 0 { if len(wantedKeys) == 0 {
return true return true
} }
for _, key := range wantedKeys { for _, key := range wantedKeys {
if bytes.Equal(key.Bytes(), actualKey) { if bytes.Equal(key.Bytes(), actualKey.Bytes()) {
return true return true
} }
} }
return false return false
} }
// GetRctLeafNodeData converts the receipts to receipt trie and returns the receipt leaf node IPLD data and
// corresponding CIDs
func GetRctLeafNodeData(rcts types.Receipts) ([]cid.Cid, [][]byte, error) {
receiptTrie := ipld.NewRctTrie()
for idx, rct := range rcts {
ethRct, err := ipld.NewReceipt(rct)
if err != nil {
return nil, nil, err
}
if err = receiptTrie.Add(idx, ethRct.RawData()); err != nil {
return nil, nil, err
}
}
rctLeafNodes, keys, err := receiptTrie.GetLeafNodes()
if err != nil {
return nil, nil, err
}
ethRctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
ethRctleafNodeData := make([][]byte, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
err = rlp.Decode(r, &idx)
if err != nil {
return nil, nil, err
}
ethRctleafNodeCids[idx] = rln.Cid()
ethRctleafNodeData[idx] = rln.RawData()
}
return ethRctleafNodeCids, ethRctleafNodeData, nil
}

View File

@ -19,15 +19,16 @@ package eth_test
import ( import (
"bytes" "bytes"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types" sdtypes "github.com/ethereum/go-ethereum/statediff/types"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
"github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
var ( var (
@ -46,29 +47,29 @@ var _ = Describe("Filterer", func() {
Expect(iplds).ToNot(BeNil()) Expect(iplds).ToNot(BeNil())
Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header)) Expect(iplds.Header).To(Equal(test_helpers.MockIPLDs.Header))
var expectedEmptyUncles []models.IPLDModel var expectedEmptyUncles []ipfs.BlockModel
Expect(iplds.Uncles).To(Equal(expectedEmptyUncles)) Expect(iplds.Uncles).To(Equal(expectedEmptyUncles))
Expect(len(iplds.Transactions)).To(Equal(4)) Expect(len(iplds.Transactions)).To(Equal(3))
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.Tx3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Transactions, test_helpers.MockTransactions.GetRlp(2))).To(BeTrue())
Expect(len(iplds.Receipts)).To(Equal(4)) Expect(len(iplds.Receipts)).To(Equal(3))
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct1IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct2IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.Rct3IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds.Receipts, test_helpers.MockReceipts.GetRlp(2))).To(BeTrue())
Expect(len(iplds.StateNodes)).To(Equal(2)) Expect(len(iplds.StateNodes)).To(Equal(2))
for _, stateNode := range iplds.StateNodes { for _, stateNode := range iplds.StateNodes {
Expect(stateNode.Type).To(Equal(sdtypes.Leaf)) Expect(stateNode.Type).To(Equal(sdtypes.Leaf))
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) { if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.AccountLeafKey) {
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{ Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
Data: test_helpers.State2IPLD.RawData(), Data: test_helpers.State2IPLD.RawData(),
Key: test_helpers.State2IPLD.Cid().String(), CID: test_helpers.State2IPLD.Cid().String(),
})) }))
} }
if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) { if bytes.Equal(stateNode.StateLeafKey.Bytes(), test_helpers.ContractLeafKey) {
Expect(stateNode.IPLD).To(Equal(models.IPLDModel{ Expect(stateNode.IPLD).To(Equal(ipfs.BlockModel{
Data: test_helpers.State1IPLD.RawData(), Data: test_helpers.State1IPLD.RawData(),
Key: test_helpers.State1IPLD.Cid().String(), CID: test_helpers.State1IPLD.Cid().String(),
})) }))
} }
} }
@ -80,116 +81,116 @@ var _ = Describe("Filterer", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds1).ToNot(BeNil()) Expect(iplds1).ToNot(BeNil())
Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds1.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds1.Header).To(Equal(models.IPLDModel{})) Expect(iplds1.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds1.Uncles)).To(Equal(0)) Expect(len(iplds1.Uncles)).To(Equal(0))
Expect(len(iplds1.Transactions)).To(Equal(0)) Expect(len(iplds1.Transactions)).To(Equal(0))
Expect(len(iplds1.StorageNodes)).To(Equal(0)) Expect(len(iplds1.StorageNodes)).To(Equal(0))
Expect(len(iplds1.StateNodes)).To(Equal(0)) Expect(len(iplds1.StateNodes)).To(Equal(0))
Expect(len(iplds1.Receipts)).To(Equal(1)) Expect(len(iplds1.Receipts)).To(Equal(1))
Expect(iplds1.Receipts[0]).To(Equal(models.IPLDModel{ Expect(iplds1.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: test_helpers.Rct1IPLD, Data: test_helpers.Rct1IPLD.RawData(),
Key: test_helpers.Rct1CID.String(), CID: test_helpers.Rct1IPLD.Cid().String(),
})) }))
iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload) iplds2, err := filterer.Filter(rctTopicsFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds2).ToNot(BeNil()) Expect(iplds2).ToNot(BeNil())
Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds2.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds2.Header).To(Equal(models.IPLDModel{})) Expect(iplds2.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds2.Uncles)).To(Equal(0)) Expect(len(iplds2.Uncles)).To(Equal(0))
Expect(len(iplds2.Transactions)).To(Equal(0)) Expect(len(iplds2.Transactions)).To(Equal(0))
Expect(len(iplds2.StorageNodes)).To(Equal(0)) Expect(len(iplds2.StorageNodes)).To(Equal(0))
Expect(len(iplds2.StateNodes)).To(Equal(0)) Expect(len(iplds2.StateNodes)).To(Equal(0))
Expect(len(iplds2.Receipts)).To(Equal(1)) Expect(len(iplds2.Receipts)).To(Equal(1))
Expect(iplds2.Receipts[0]).To(Equal(models.IPLDModel{ Expect(iplds2.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: test_helpers.Rct1IPLD, Data: test_helpers.Rct1IPLD.RawData(),
Key: test_helpers.Rct1CID.String(), CID: test_helpers.Rct1IPLD.Cid().String(),
})) }))
iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload) iplds3, err := filterer.Filter(rctTopicsAndAddressFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds3).ToNot(BeNil()) Expect(iplds3).ToNot(BeNil())
Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds3.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds3.Header).To(Equal(models.IPLDModel{})) Expect(iplds3.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds3.Uncles)).To(Equal(0)) Expect(len(iplds3.Uncles)).To(Equal(0))
Expect(len(iplds3.Transactions)).To(Equal(0)) Expect(len(iplds3.Transactions)).To(Equal(0))
Expect(len(iplds3.StorageNodes)).To(Equal(0)) Expect(len(iplds3.StorageNodes)).To(Equal(0))
Expect(len(iplds3.StateNodes)).To(Equal(0)) Expect(len(iplds3.StateNodes)).To(Equal(0))
Expect(len(iplds3.Receipts)).To(Equal(1)) Expect(len(iplds3.Receipts)).To(Equal(1))
Expect(iplds3.Receipts[0]).To(Equal(models.IPLDModel{ Expect(iplds3.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: test_helpers.Rct1IPLD, Data: test_helpers.Rct1IPLD.RawData(),
Key: test_helpers.Rct1CID.String(), CID: test_helpers.Rct1IPLD.Cid().String(),
})) }))
iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload) iplds4, err := filterer.Filter(rctAddressesAndTopicFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds4).ToNot(BeNil()) Expect(iplds4).ToNot(BeNil())
Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds4.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds4.Header).To(Equal(models.IPLDModel{})) Expect(iplds4.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds4.Uncles)).To(Equal(0)) Expect(len(iplds4.Uncles)).To(Equal(0))
Expect(len(iplds4.Transactions)).To(Equal(0)) Expect(len(iplds4.Transactions)).To(Equal(0))
Expect(len(iplds4.StorageNodes)).To(Equal(0)) Expect(len(iplds4.StorageNodes)).To(Equal(0))
Expect(len(iplds4.StateNodes)).To(Equal(0)) Expect(len(iplds4.StateNodes)).To(Equal(0))
Expect(len(iplds4.Receipts)).To(Equal(1)) Expect(len(iplds4.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{ Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: test_helpers.Rct2IPLD, Data: test_helpers.Rct2IPLD.RawData(),
Key: test_helpers.Rct2CID.String(), CID: test_helpers.Rct2IPLD.Cid().String(),
})) }))
iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload) iplds5, err := filterer.Filter(rctsForAllCollectedTrxs, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds5).ToNot(BeNil()) Expect(iplds5).ToNot(BeNil())
Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds5.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds5.Header).To(Equal(models.IPLDModel{})) Expect(iplds5.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds5.Uncles)).To(Equal(0)) Expect(len(iplds5.Uncles)).To(Equal(0))
Expect(len(iplds5.Transactions)).To(Equal(4)) Expect(len(iplds5.Transactions)).To(Equal(3))
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx1)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(0))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx3)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(2))).To(BeTrue())
Expect(len(iplds5.StorageNodes)).To(Equal(0)) Expect(len(iplds5.StorageNodes)).To(Equal(0))
Expect(len(iplds5.StateNodes)).To(Equal(0)) Expect(len(iplds5.StateNodes)).To(Equal(0))
Expect(len(iplds5.Receipts)).To(Equal(4)) Expect(len(iplds5.Receipts)).To(Equal(3))
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct1IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(0))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct2IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(1))).To(BeTrue())
Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.Rct3IPLD)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Receipts, test_helpers.MockReceipts.GetRlp(2))).To(BeTrue())
iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload) iplds6, err := filterer.Filter(rctsForSelectCollectedTrxs, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds6).ToNot(BeNil()) Expect(iplds6).ToNot(BeNil())
Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds6.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds6.Header).To(Equal(models.IPLDModel{})) Expect(iplds6.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds6.Uncles)).To(Equal(0)) Expect(len(iplds6.Uncles)).To(Equal(0))
Expect(len(iplds6.Transactions)).To(Equal(1)) Expect(len(iplds6.Transactions)).To(Equal(1))
Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.Tx2)).To(BeTrue()) Expect(shared.IPLDsContainBytes(iplds5.Transactions, test_helpers.MockTransactions.GetRlp(1))).To(BeTrue())
Expect(len(iplds6.StorageNodes)).To(Equal(0)) Expect(len(iplds6.StorageNodes)).To(Equal(0))
Expect(len(iplds6.StateNodes)).To(Equal(0)) Expect(len(iplds6.StateNodes)).To(Equal(0))
Expect(len(iplds6.Receipts)).To(Equal(1)) Expect(len(iplds6.Receipts)).To(Equal(1))
Expect(iplds4.Receipts[0]).To(Equal(models.IPLDModel{ Expect(iplds4.Receipts[0]).To(Equal(ipfs.BlockModel{
Data: test_helpers.Rct2IPLD, Data: test_helpers.Rct2IPLD.RawData(),
Key: test_helpers.Rct2CID.String(), CID: test_helpers.Rct2IPLD.Cid().String(),
})) }))
iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload) iplds7, err := filterer.Filter(stateFilter, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds7).ToNot(BeNil()) Expect(iplds7).ToNot(BeNil())
Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds7.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds7.Header).To(Equal(models.IPLDModel{})) Expect(iplds7.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds7.Uncles)).To(Equal(0)) Expect(len(iplds7.Uncles)).To(Equal(0))
Expect(len(iplds7.Transactions)).To(Equal(0)) Expect(len(iplds7.Transactions)).To(Equal(0))
Expect(len(iplds7.StorageNodes)).To(Equal(0)) Expect(len(iplds7.StorageNodes)).To(Equal(0))
Expect(len(iplds7.Receipts)).To(Equal(0)) Expect(len(iplds7.Receipts)).To(Equal(0))
Expect(len(iplds7.StateNodes)).To(Equal(1)) Expect(len(iplds7.StateNodes)).To(Equal(1))
Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey)) Expect(iplds7.StateNodes[0].StateLeafKey.Bytes()).To(Equal(test_helpers.AccountLeafKey))
Expect(iplds7.StateNodes[0].IPLD).To(Equal(models.IPLDModel{ Expect(iplds7.StateNodes[0].IPLD).To(Equal(ipfs.BlockModel{
Data: test_helpers.State2IPLD.RawData(), Data: test_helpers.State2IPLD.RawData(),
Key: test_helpers.State2IPLD.Cid().String(), CID: test_helpers.State2IPLD.Cid().String(),
})) }))
iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload) iplds8, err := filterer.Filter(rctTopicsAndAddressFilterFail, test_helpers.MockConvertedPayload)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(iplds8).ToNot(BeNil()) Expect(iplds8).ToNot(BeNil())
Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64())) Expect(iplds8.BlockNumber.Int64()).To(Equal(test_helpers.MockIPLDs.BlockNumber.Int64()))
Expect(iplds8.Header).To(Equal(models.IPLDModel{})) Expect(iplds8.Header).To(Equal(ipfs.BlockModel{}))
Expect(len(iplds8.Uncles)).To(Equal(0)) Expect(len(iplds8.Uncles)).To(Equal(0))
Expect(len(iplds8.Transactions)).To(Equal(0)) Expect(len(iplds8.Transactions)).To(Equal(0))
Expect(len(iplds8.StorageNodes)).To(Equal(0)) Expect(len(iplds8.StorageNodes)).To(Equal(0))

View File

@ -17,7 +17,11 @@
package eth package eth
import ( import (
"fmt"
sdtypes "github.com/ethereum/go-ethereum/statediff/types" sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/params"
) )
func ResolveToNodeType(nodeType int) sdtypes.NodeType { func ResolveToNodeType(nodeType int) sdtypes.NodeType {
@ -34,3 +38,19 @@ func ResolveToNodeType(nodeType int) sdtypes.NodeType {
return sdtypes.Unknown return sdtypes.Unknown
} }
} }
// ChainConfig returns the appropriate ethereum chain config for the provided chain id
func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
switch chainID {
case 1:
return params.MainnetChainConfig, nil
case 3:
return params.RopstenChainConfig, nil
case 4:
return params.RinkebyChainConfig, nil
case 5:
return params.GoerliChainConfig, nil
default:
return nil, fmt.Errorf("chain config for chainid %d not available", chainID)
}
}

View File

@ -22,10 +22,14 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/shared"
) )
// Fetcher interface for substituting mocks in tests // Fetcher interface for substituting mocks in tests
@ -36,11 +40,11 @@ type Fetcher interface {
// IPLDFetcher satisfies the IPLDFetcher interface for ethereum // IPLDFetcher satisfies the IPLDFetcher interface for ethereum
// It interfaces directly with PG-IPFS // It interfaces directly with PG-IPFS
type IPLDFetcher struct { type IPLDFetcher struct {
db *sqlx.DB db *postgres.DB
} }
// NewIPLDFetcher creates a pointer to a new IPLDFetcher // NewIPLDFetcher creates a pointer to a new IPLDFetcher
func NewIPLDFetcher(db *sqlx.DB) *IPLDFetcher { func NewIPLDFetcher(db *postgres.DB) *IPLDFetcher {
return &IPLDFetcher{ return &IPLDFetcher{
db: db, db: db,
} }
@ -99,73 +103,72 @@ func (f *IPLDFetcher) Fetch(cids CIDWrapper) (*IPLDs, error) {
return iplds, err return iplds, err
} }
// FetchHeader fetches header // FetchHeaders fetches headers
func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c models.HeaderModel) (models.IPLDModel, error) { func (f *IPLDFetcher) FetchHeader(tx *sqlx.Tx, c eth.HeaderModel) (ipfs.BlockModel, error) {
log.Debug("fetching header ipld") log.Debug("fetching header ipld")
headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) headerBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return models.IPLDModel{}, err return ipfs.BlockModel{}, err
} }
return models.IPLDModel{ return ipfs.BlockModel{
Data: headerBytes, Data: headerBytes,
Key: c.CID, CID: c.CID,
}, nil }, nil
} }
// FetchUncles fetches uncles // FetchUncles fetches uncles
func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []models.UncleModel) ([]models.IPLDModel, error) { func (f *IPLDFetcher) FetchUncles(tx *sqlx.Tx, cids []eth.UncleModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching uncle iplds") log.Debug("fetching uncle iplds")
uncleIPLDs := make([]models.IPLDModel, len(cids)) uncleIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) uncleBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
uncleIPLDs[i] = models.IPLDModel{ uncleIPLDs[i] = ipfs.BlockModel{
Data: uncleBytes, Data: uncleBytes,
Key: c.CID, CID: c.CID,
} }
} }
return uncleIPLDs, nil return uncleIPLDs, nil
} }
// FetchTrxs fetches transactions // FetchTrxs fetches transactions
func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []models.TxModel) ([]models.IPLDModel, error) { func (f *IPLDFetcher) FetchTrxs(tx *sqlx.Tx, cids []eth.TxModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching transaction iplds") log.Debug("fetching transaction iplds")
trxIPLDs := make([]models.IPLDModel, len(cids)) trxIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey) txBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
trxIPLDs[i] = models.IPLDModel{ trxIPLDs[i] = ipfs.BlockModel{
Data: txBytes, Data: txBytes,
Key: c.CID, CID: c.CID,
} }
} }
return trxIPLDs, nil return trxIPLDs, nil
} }
// FetchRcts fetches receipts // FetchRcts fetches receipts
func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []models.ReceiptModel) ([]models.IPLDModel, error) { func (f *IPLDFetcher) FetchRcts(tx *sqlx.Tx, cids []eth.ReceiptModel) ([]ipfs.BlockModel, error) {
log.Debug("fetching receipt iplds") log.Debug("fetching receipt iplds")
rctIPLDs := make([]models.IPLDModel, len(cids)) rctIPLDs := make([]ipfs.BlockModel, len(cids))
for i, c := range cids { for i, c := range cids {
rctBytes, err := shared.FetchIPLDByMhKey(tx, c.LeafMhKey) rctBytes, err := shared.FetchIPLDByMhKey(tx, c.MhKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
//nodeVal, err := DecodeLeafNode(rctBytes) rctIPLDs[i] = ipfs.BlockModel{
rctIPLDs[i] = models.IPLDModel{
Data: rctBytes, Data: rctBytes,
Key: c.LeafCID, CID: c.CID,
} }
} }
return rctIPLDs, nil return rctIPLDs, nil
} }
// FetchState fetches state nodes // FetchState fetches state nodes
func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]StateNode, error) { func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []eth.StateNodeModel) ([]StateNode, error) {
log.Debug("fetching state iplds") log.Debug("fetching state iplds")
stateNodes := make([]StateNode, 0, len(cids)) stateNodes := make([]StateNode, 0, len(cids))
for _, stateNode := range cids { for _, stateNode := range cids {
@ -177,9 +180,9 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]S
return nil, err return nil, err
} }
stateNodes = append(stateNodes, StateNode{ stateNodes = append(stateNodes, StateNode{
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: stateBytes, Data: stateBytes,
Key: stateNode.CID, CID: stateNode.CID,
}, },
StateLeafKey: common.HexToHash(stateNode.StateKey), StateLeafKey: common.HexToHash(stateNode.StateKey),
Type: ResolveToNodeType(stateNode.NodeType), Type: ResolveToNodeType(stateNode.NodeType),
@ -190,7 +193,7 @@ func (f *IPLDFetcher) FetchState(tx *sqlx.Tx, cids []models.StateNodeModel) ([]S
} }
// FetchStorage fetches storage nodes // FetchStorage fetches storage nodes
func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithStateKeyModel) ([]StorageNode, error) { func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []eth.StorageNodeWithStateKeyModel) ([]StorageNode, error) {
log.Debug("fetching storage iplds") log.Debug("fetching storage iplds")
storageNodes := make([]StorageNode, 0, len(cids)) storageNodes := make([]StorageNode, 0, len(cids))
for _, storageNode := range cids { for _, storageNode := range cids {
@ -202,9 +205,9 @@ func (f *IPLDFetcher) FetchStorage(tx *sqlx.Tx, cids []models.StorageNodeWithSta
return nil, err return nil, err
} }
storageNodes = append(storageNodes, StorageNode{ storageNodes = append(storageNodes, StorageNode{
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: storageBytes, Data: storageBytes,
Key: storageNode.CID, CID: storageNode.CID,
}, },
StateLeafKey: common.HexToHash(storageNode.StateKey), StateLeafKey: common.HexToHash(storageNode.StateKey),
StorageLeafKey: common.HexToHash(storageNode.StorageKey), StorageLeafKey: common.HexToHash(storageNode.StorageKey),

View File

@ -17,45 +17,35 @@
package eth_test package eth_test
import ( import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers" "github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/pkg/eth/test_helpers"
) )
var _ = Describe("IPLDFetcher", func() { var _ = Describe("IPLDFetcher", func() {
var ( var (
db *sqlx.DB db *postgres.DB
pubAndIndexer interfaces.StateDiffIndexer pubAndIndexer *eth2.IPLDPublisher
fetcher *eth.IPLDFetcher fetcher *eth.IPLDFetcher
) )
Describe("Fetch", func() { Describe("Fetch", func() {
BeforeEach(func() { BeforeEach(func() {
var ( var err error
err error db, err = shared.SetupDB()
tx interfaces.Batch Expect(err).ToNot(HaveOccurred())
) pubAndIndexer = eth2.NewIPLDPublisher(db)
db = shared.SetupDB() err = pubAndIndexer.Publish(test_helpers.MockConvertedPayload)
pubAndIndexer = shared.SetupTestStateDiffIndexer(ctx, params.TestChainConfig, test_helpers.Genesis.Hash())
tx, err = pubAndIndexer.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
for _, node := range test_helpers.MockStateNodes {
err = pubAndIndexer.PushStateNode(tx, node, test_helpers.MockBlock.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
fetcher = eth.NewIPLDFetcher(db) fetcher = eth.NewIPLDFetcher(db)
}) })
AfterEach(func() { AfterEach(func() {
shared.TearDownDB(db) eth.TearDownDB(db)
}) })
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() { It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {

View File

@ -19,31 +19,25 @@ package eth
import ( import (
"fmt" "fmt"
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
) )
const ( const (
// node type removed value.
// https://github.com/vulcanize/go-ethereum/blob/271f4d01e7e2767ffd8e0cd469bf545be96f2a84/statediff/indexer/helpers.go#L34
removedNode = 3
RetrieveHeadersByHashesPgStr = `SELECT cid, data RetrieveHeadersByHashesPgStr = `SELECT cid, data
FROM eth.header_cids FROM eth.header_cids
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
WHERE block_hash = ANY($1::VARCHAR(66)[])` WHERE block_hash = ANY($1::VARCHAR(66)[])`
RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data RetrieveHeadersByBlockNumberPgStr = `SELECT cid, data
FROM eth.header_cids FROM eth.header_cids
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
WHERE block_number = $1` WHERE block_number = $1`
RetrieveHeaderByHashPgStr = `SELECT cid, data RetrieveHeaderByHashPgStr = `SELECT cid, data
FROM eth.header_cids FROM eth.header_cids
INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (header_cids.mh_key = blocks.key)
WHERE block_hash = $1` WHERE block_hash = $1`
RetrieveUnclesByHashesPgStr = `SELECT cid, data RetrieveUnclesByHashesPgStr = `SELECT cid, data
@ -52,12 +46,12 @@ const (
WHERE block_hash = ANY($1::VARCHAR(66)[])` WHERE block_hash = ANY($1::VARCHAR(66)[])`
RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data RetrieveUnclesByBlockHashPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
WHERE block_hash = $1` WHERE block_hash = $1`
RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data RetrieveUnclesByBlockNumberPgStr = `SELECT uncle_cids.cid, data
FROM eth.uncle_cids FROM eth.uncle_cids
INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (uncle_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (uncle_cids.mh_key = blocks.key)
WHERE block_number = $1` WHERE block_number = $1`
RetrieveUncleByHashPgStr = `SELECT cid, data RetrieveUncleByHashPgStr = `SELECT cid, data
@ -70,13 +64,13 @@ const (
WHERE tx_hash = ANY($1::VARCHAR(66)[])` WHERE tx_hash = ANY($1::VARCHAR(66)[])`
RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data RetrieveTransactionsByBlockHashPgStr = `SELECT transaction_cids.cid, data
FROM eth.transaction_cids FROM eth.transaction_cids
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
WHERE block_hash = $1 WHERE block_hash = $1
ORDER BY eth.transaction_cids.index ASC` ORDER BY eth.transaction_cids.index ASC`
RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data RetrieveTransactionsByBlockNumberPgStr = `SELECT transaction_cids.cid, data
FROM eth.transaction_cids FROM eth.transaction_cids
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
WHERE block_number = $1 WHERE block_number = $1
ORDER BY eth.transaction_cids.index ASC` ORDER BY eth.transaction_cids.index ASC`
@ -84,99 +78,97 @@ const (
FROM eth.transaction_cids FROM eth.transaction_cids
INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (transaction_cids.mh_key = blocks.key)
WHERE tx_hash = $1` WHERE tx_hash = $1`
RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.leaf_cid, data RetrieveReceiptsByTxHashesPgStr = `SELECT receipt_cids.cid, data
FROM eth.receipt_cids FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash) INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key) INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
WHERE tx_hash = ANY($1::VARCHAR(66)[])` WHERE tx_hash = ANY($1::VARCHAR(66)[])`
RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.leaf_cid, data, eth.transaction_cids.tx_hash RetrieveReceiptsByBlockHashPgStr = `SELECT receipt_cids.cid, data
FROM eth.receipt_cids FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash) INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key) INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
WHERE block_hash = $1 WHERE block_hash = $1
ORDER BY eth.transaction_cids.index ASC` ORDER BY eth.transaction_cids.index ASC`
RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.leaf_cid, data RetrieveReceiptsByBlockNumberPgStr = `SELECT receipt_cids.cid, data
FROM eth.receipt_cids FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash) INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key) INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
WHERE block_number = $1 WHERE block_number = $1
ORDER BY eth.transaction_cids.index ASC` ORDER BY eth.transaction_cids.index ASC`
RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.leaf_cid, data RetrieveReceiptByTxHashPgStr = `SELECT receipt_cids.cid, data
FROM eth.receipt_cids FROM eth.receipt_cids
INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.tx_hash) INNER JOIN eth.transaction_cids ON (receipt_cids.tx_id = transaction_cids.id)
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = blocks.key) INNER JOIN public.blocks ON (receipt_cids.mh_key = blocks.key)
WHERE tx_hash = $1` WHERE tx_hash = $1`
RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid, data, state_cids.node_type RetrieveAccountByLeafKeyAndBlockHashPgStr = `SELECT state_cids.cid,
data,
was_state_removed(state_path, block_number, $2) AS removed
FROM eth.state_cids FROM eth.state_cids
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
WHERE state_leaf_key = $1 WHERE state_leaf_key = $1
AND block_number <= (SELECT block_number AND block_number <= (SELECT block_number
FROM eth.header_cids FROM eth.header_cids
WHERE block_hash = $2) WHERE block_hash = $2)
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number)) AND header_cids.id = (SELECT canonical_header_id(block_number))
ORDER BY block_number DESC ORDER BY block_number DESC
LIMIT 1` LIMIT 1`
RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid, data, state_cids.node_type RetrieveAccountByLeafKeyAndBlockNumberPgStr = `SELECT state_cids.cid,
data,
was_state_removed(state_path, block_number, (SELECT block_hash
FROM eth.header_cids
WHERE block_number = $2
LIMIT 1)) AS removed
FROM eth.state_cids FROM eth.state_cids
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash) INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (state_cids.mh_key = blocks.key)
WHERE state_leaf_key = $1 WHERE state_leaf_key = $1
AND block_number <= $2 AND block_number <= $2
ORDER BY block_number DESC ORDER BY block_number DESC
LIMIT 1` LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr = `SELECT storage_cids.cid,
data,
was_storage_removed(storage_path, block_number, (SELECT block_hash
FROM eth.header_cids
WHERE block_number = $3
LIMIT 1)) AS removed
FROM eth.storage_cids FROM eth.storage_cids
INNER JOIN eth.state_cids ON ( INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
storage_cids.header_id = state_cids.header_id INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
AND storage_cids.state_path = state_cids.state_path
)
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
WHERE state_leaf_key = $1 WHERE state_leaf_key = $1
AND storage_leaf_key = $2 AND storage_leaf_key = $2
AND block_number <= $3 AND block_number <= $3
ORDER BY block_number DESC ORDER BY block_number DESC
LIMIT 1` LIMIT 1`
RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid, data, storage_cids.node_type, was_state_leaf_removed($1, $3) AS state_leaf_removed RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr = `SELECT storage_cids.cid,
data,
was_storage_removed(storage_path, block_number, $3) AS removed
FROM eth.storage_cids FROM eth.storage_cids
INNER JOIN eth.state_cids ON ( INNER JOIN eth.state_cids ON (storage_cids.state_id = state_cids.id)
storage_cids.header_id = state_cids.header_id INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.id)
AND storage_cids.state_path = state_cids.state_path
)
INNER JOIN eth.header_cids ON (state_cids.header_id = header_cids.block_hash)
INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key) INNER JOIN public.blocks ON (storage_cids.mh_key = blocks.key)
WHERE state_leaf_key = $1 WHERE state_leaf_key = $1
AND storage_leaf_key = $2 AND storage_leaf_key = $2
AND block_number <= (SELECT block_number AND block_number <= (SELECT block_number
FROM eth.header_cids FROM eth.header_cids
WHERE block_hash = $3) WHERE block_hash = $3)
AND header_cids.block_hash = (SELECT canonical_header_hash(block_number)) AND header_cids.id = (SELECT canonical_header_id(block_number))
ORDER BY block_number DESC ORDER BY block_number DESC
LIMIT 1` LIMIT 1`
) )
var EmptyNodeValue = make([]byte, common.HashLength)
type rctIpldResult struct {
LeafCID string `db:"leaf_cid"`
Data []byte `db:"data"`
TxHash string `db:"tx_hash"`
}
type ipldResult struct { type ipldResult struct {
CID string `db:"cid"` CID string `db:"cid"`
Data []byte `db:"data"` Data []byte `db:"data"`
TxHash string `db:"tx_hash"`
} }
type IPLDRetriever struct { type IPLDRetriever struct {
db *sqlx.DB db *postgres.DB
} }
func NewIPLDRetriever(db *sqlx.DB) *IPLDRetriever { func NewIPLDRetriever(db *postgres.DB) *IPLDRetriever {
return &IPLDRetriever{ return &IPLDRetriever{
db: db, db: db,
} }
@ -333,26 +325,9 @@ func (r *IPLDRetriever) RetrieveTransactionByTxHash(hash common.Hash) (string, [
return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex()) return txResult.CID, txResult.Data, r.db.Get(txResult, RetrieveTransactionByHashPgStr, hash.Hex())
} }
// DecodeLeafNode decodes the leaf node data
func DecodeLeafNode(node []byte) ([]byte, error) {
var nodeElements []interface{}
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
return nil, err
}
ty, err := trie_helpers.CheckKeyType(nodeElements)
if err != nil {
return nil, err
}
if ty != sdtypes.Leaf {
return nil, fmt.Errorf("expected leaf node but found %s", ty)
}
return nodeElements[1].([]byte), nil
}
// RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes // RetrieveReceiptsByTxHashes returns the cids and rlp bytes for the receipts corresponding to the provided tx hashes
func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) { func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]string, [][]byte, error) {
rctResults := make([]rctIpldResult, 0) rctResults := make([]ipldResult, 0)
hashStrs := make([]string, len(hashes)) hashStrs := make([]string, len(hashes))
for i, hash := range hashes { for i, hash := range hashes {
hashStrs[i] = hash.Hex() hashStrs[i] = hash.Hex()
@ -363,80 +338,52 @@ func (r *IPLDRetriever) RetrieveReceiptsByTxHashes(hashes []common.Hash) ([]stri
cids := make([]string, len(rctResults)) cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults)) rcts := make([][]byte, len(rctResults))
for i, res := range rctResults { for i, res := range rctResults {
cids[i] = res.LeafCID cids[i] = res.CID
nodeVal, err := DecodeLeafNode(res.Data) rcts[i] = res.Data
if err != nil {
return nil, nil, err
}
rcts[i] = nodeVal
} }
return cids, rcts, nil return cids, rcts, nil
} }
// RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash. // RetrieveReceiptsByBlockHash returns the cids and rlp bytes for the receipts corresponding to the provided block hash
// cid returned corresponds to the leaf node data which contains the receipt. func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, error) {
func (r *IPLDRetriever) RetrieveReceiptsByBlockHash(hash common.Hash) ([]string, [][]byte, []common.Hash, error) { rctResults := make([]ipldResult, 0)
rctResults := make([]rctIpldResult, 0)
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil { if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockHashPgStr, hash.Hex()); err != nil {
return nil, nil, nil, err return nil, nil, err
} }
cids := make([]string, len(rctResults)) cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults)) rcts := make([][]byte, len(rctResults))
txs := make([]common.Hash, len(rctResults))
for i, res := range rctResults { for i, res := range rctResults {
cids[i] = res.LeafCID cids[i] = res.CID
nodeVal, err := DecodeLeafNode(res.Data) rcts[i] = res.Data
if err != nil {
return nil, nil, nil, err
}
rcts[i] = nodeVal
txs[i] = common.HexToHash(res.TxHash)
} }
return cids, rcts, nil
return cids, rcts, txs, nil
} }
// RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash. // RetrieveReceiptsByBlockNumber returns the cids and rlp bytes for the receipts corresponding to the provided block hash
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) { func (r *IPLDRetriever) RetrieveReceiptsByBlockNumber(number uint64) ([]string, [][]byte, error) {
rctResults := make([]rctIpldResult, 0) rctResults := make([]ipldResult, 0)
if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil { if err := r.db.Select(&rctResults, RetrieveReceiptsByBlockNumberPgStr, number); err != nil {
return nil, nil, err return nil, nil, err
} }
cids := make([]string, len(rctResults)) cids := make([]string, len(rctResults))
rcts := make([][]byte, len(rctResults)) rcts := make([][]byte, len(rctResults))
for i, res := range rctResults { for i, res := range rctResults {
cids[i] = res.LeafCID cids[i] = res.CID
nodeVal, err := DecodeLeafNode(res.Data) rcts[i] = res.Data
if err != nil {
return nil, nil, err
}
rcts[i] = nodeVal
} }
return cids, rcts, nil return cids, rcts, nil
} }
// RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash. // RetrieveReceiptByHash returns the cid and rlp bytes for the receipt corresponding to the provided tx hash
// cid returned corresponds to the leaf node data which contains the receipt.
func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) { func (r *IPLDRetriever) RetrieveReceiptByHash(hash common.Hash) (string, []byte, error) {
rctResult := new(rctIpldResult) rctResult := new(ipldResult)
if err := r.db.Select(&rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex()); err != nil { return rctResult.CID, rctResult.Data, r.db.Get(rctResult, RetrieveReceiptByTxHashPgStr, hash.Hex())
return "", nil, err
}
nodeVal, err := DecodeLeafNode(rctResult.Data)
if err != nil {
return "", nil, err
}
return rctResult.LeafCID, nodeVal, nil
} }
type nodeInfo struct { type nodeInfo struct {
CID string `db:"cid"` CID string `db:"cid"`
Data []byte `db:"data"` Data []byte `db:"data"`
NodeType int `db:"node_type"` Removed bool `db:"removed"`
StateLeafRemoved bool `db:"state_leaf_removed"`
} }
// RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash // RetrieveAccountByAddressAndBlockHash returns the cid and rlp bytes for the account corresponding to the provided address and block hash
@ -447,11 +394,9 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockHash(address common.Addr
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil { if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockHashPgStr, leafKey.Hex(), hash.Hex()); err != nil {
return "", nil, err return "", nil, err
} }
if accountResult.Removed {
if accountResult.NodeType == removedNode { return "", []byte{}, nil
return "", EmptyNodeValue, nil
} }
var i []interface{} var i []interface{}
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil { if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error()) return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
@ -470,11 +415,9 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil { if err := r.db.Get(accountResult, RetrieveAccountByLeafKeyAndBlockNumberPgStr, leafKey.Hex(), number); err != nil {
return "", nil, err return "", nil, err
} }
if accountResult.Removed {
if accountResult.NodeType == removedNode { return "", []byte{}, nil
return "", EmptyNodeValue, nil
} }
var i []interface{} var i []interface{}
if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil { if err := rlp.DecodeBytes(accountResult.Data, &i); err != nil {
return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error()) return "", nil, fmt.Errorf("error decoding state leaf node rlp: %s", err.Error())
@ -485,26 +428,25 @@ func (r *IPLDRetriever) RetrieveAccountByAddressAndBlockNumber(address common.Ad
return accountResult.CID, i[1].([]byte), nil return accountResult.CID, i[1].([]byte), nil
} }
// RetrieveStorageAtByAddressAndStorageSlotAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage slot, and block hash // RetrieveStorageAtByAddressAndStorageKeyAndBlockHash returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block hash
func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(address common.Address, key, hash common.Hash) (string, []byte, []byte, error) { func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockHash(address common.Address, storageLeafKey, hash common.Hash) (string, []byte, error) {
storageResult := new(nodeInfo) storageResult := new(nodeInfo)
stateLeafKey := crypto.Keccak256Hash(address.Bytes()) stateLeafKey := crypto.Keccak256Hash(address.Bytes())
storageHash := crypto.Keccak256Hash(key.Bytes()) if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), hash.Hex()); err != nil {
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockHashPgStr, stateLeafKey.Hex(), storageHash.Hex(), hash.Hex()); err != nil { return "", nil, err
return "", nil, nil, err
} }
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode { if storageResult.Removed {
return "", EmptyNodeValue, EmptyNodeValue, nil return "", []byte{}, nil
} }
var i []interface{} var i []interface{}
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil { if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {
err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error()) err = fmt.Errorf("error decoding storage leaf node rlp: %s", err.Error())
return "", nil, nil, err return "", nil, err
} }
if len(i) != 2 { if len(i) != 2 {
return "", nil, nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements") return "", nil, fmt.Errorf("eth IPLDRetriever expected storage leaf node rlp to decode into two elements")
} }
return storageResult.CID, storageResult.Data, i[1].([]byte), nil return storageResult.CID, i[1].([]byte), nil
} }
// RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number // RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber returns the cid and rlp bytes for the storage value corresponding to the provided address, storage key, and block number
@ -515,9 +457,8 @@ func (r *IPLDRetriever) RetrieveStorageAtByAddressAndStorageKeyAndBlockNumber(ad
if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil { if err := r.db.Get(storageResult, RetrieveStorageLeafByAddressHashAndLeafKeyAndBlockNumberPgStr, stateLeafKey.Hex(), storageLeafKey.Hex(), number); err != nil {
return "", nil, err return "", nil, err
} }
if storageResult.Removed {
if storageResult.StateLeafRemoved || storageResult.NodeType == removedNode { return "", []byte{}, nil
return "", EmptyNodeValue, nil
} }
var i []interface{} var i []interface{}
if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil { if err := rlp.DecodeBytes(storageResult.Data, &i); err != nil {

View File

@ -17,11 +17,36 @@
package eth package eth
import ( import (
"github.com/ethereum/go-ethereum/statediff/indexer/models" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
) )
// TearDownDB is used to tear down the watcher dbs after tests
func TearDownDB(db *postgres.DB) {
tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM blocks`)
Expect(err).NotTo(HaveOccurred())
err = tx.Commit()
Expect(err).NotTo(HaveOccurred())
}
// TxModelsContainsCID used to check if a list of TxModels contains a specific cid string // TxModelsContainsCID used to check if a list of TxModels contains a specific cid string
func TxModelsContainsCID(txs []models.TxModel, cid string) bool { func TxModelsContainsCID(txs []eth.TxModel, cid string) bool {
for _, tx := range txs { for _, tx := range txs {
if tx.CID == cid { if tx.CID == cid {
return true return true
@ -30,10 +55,10 @@ func TxModelsContainsCID(txs []models.TxModel, cid string) bool {
return false return false
} }
// ReceiptModelsContainsCID used to check if a list of ReceiptModel contains a specific cid string // ListContainsBytes used to check if a list of byte arrays contains a particular byte array
func ReceiptModelsContainsCID(rcts []models.ReceiptModel, cid string) bool { func ReceiptModelsContainsCID(rcts []eth.ReceiptModel, cid string) bool {
for _, rct := range rcts { for _, rct := range rcts {
if rct.LeafCID == cid { if rct.CID == cid {
return true return true
} }
} }

View File

@ -62,7 +62,6 @@ data function sig: 73d4a13a
// the returned hash chain is ordered head->parent. // the returned hash chain is ordered head->parent.
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) { func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, []types.Receipts, *core.BlockChain) {
config := params.TestChainConfig config := params.TestChainConfig
config.LondonBlock = big.NewInt(100)
blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen) blocks, receipts := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) chain, _ := core.NewBlockChain(Testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
return append([]*types.Block{parent}, blocks...), receipts, chain return append([]*types.Block{parent}, blocks...), receipts, chain

View File

@ -17,28 +17,32 @@
package test_helpers package test_helpers
import ( import (
"bytes"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic" "crypto/elliptic"
"crypto/rand" "crypto/rand"
"math/big" "math/big"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld" "github.com/ethereum/go-ethereum/statediff/testhelpers"
"github.com/ethereum/go-ethereum/statediff/indexer/models" "github.com/ipfs/go-block-format"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
testhelpers "github.com/ethereum/go-ethereum/statediff/test_helpers"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/ethereum/go-ethereum/trie"
blocks "github.com/ipfs/go-block-format"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
eth2 "github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
// Test variables // Test variables
@ -54,7 +58,7 @@ var (
Difficulty: big.NewInt(5000000), Difficulty: big.NewInt(5000000),
Extra: []byte{}, Extra: []byte{},
} }
MockTransactions, MockReceipts, SenderAddr = createLegacyTransactionsAndReceipts() MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts()
MockUncles = []*types.Header{ MockUncles = []*types.Header{
{ {
Time: 1, Time: 1,
@ -76,7 +80,7 @@ var (
}, },
} }
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts) ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
MockBlock = createNewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie)) MockBlock = types.NewBlock(&MockHeader, MockTransactions, MockUncles, MockReceipts, new(trie.Trie))
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header()) MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
MockChildHeader = types.Header{ MockChildHeader = types.Header{
Time: 0, Time: 0,
@ -92,8 +96,6 @@ var (
MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header()) MockChildRlp, _ = rlp.EncodeToBytes(MockChild.Header())
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592") Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593") AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
AnotherAddress1 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476594")
AnotherAddress2 = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476596")
ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce()) ContractAddress = crypto.CreateAddress(SenderAddr, MockTransactions[2].Nonce())
ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String() ContractHash = crypto.Keccak256Hash(ContractAddress.Bytes()).String()
MockContractByteCode = []byte{0, 1, 2, 3, 4, 5} MockContractByteCode = []byte{0, 1, 2, 3, 4, 5}
@ -101,97 +103,37 @@ var (
mockTopic12 = common.HexToHash("0x06") mockTopic12 = common.HexToHash("0x06")
mockTopic21 = common.HexToHash("0x05") mockTopic21 = common.HexToHash("0x05")
mockTopic22 = common.HexToHash("0x07") mockTopic22 = common.HexToHash("0x07")
mockTopic31 = common.HexToHash("0x08")
mockTopic41 = common.HexToHash("0x09")
mockTopic42 = common.HexToHash("0x0a")
mockTopic43 = common.HexToHash("0x0b")
mockTopic51 = common.HexToHash("0x0c")
mockTopic61 = common.HexToHash("0x0d")
MockLog1 = &types.Log{ MockLog1 = &types.Log{
Address: Address, Address: Address,
Topics: []common.Hash{mockTopic11, mockTopic12}, Topics: []common.Hash{mockTopic11, mockTopic12},
Data: []byte{}, Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 0,
Index: 0,
} }
MockLog2 = &types.Log{ MockLog2 = &types.Log{
Address: AnotherAddress, Address: AnotherAddress,
Topics: []common.Hash{mockTopic21, mockTopic22}, Topics: []common.Hash{mockTopic21, mockTopic22},
Data: []byte{}, Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 1,
Index: 1,
} }
MockLog3 = &types.Log{ HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
Address: AnotherAddress1, HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
Topics: []common.Hash{mockTopic31}, Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(0), multihash.KECCAK_256)
Data: []byte{}, Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
BlockNumber: BlockNumber.Uint64(), Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(1), multihash.KECCAK_256)
TxIndex: 2, Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Index: 2, Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, MockTransactions.GetRlp(2), multihash.KECCAK_256)
} Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(0), multihash.KECCAK_256)
MockLog4 = &types.Log{ Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Address: AnotherAddress1, Rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(1), multihash.KECCAK_256)
Topics: []common.Hash{mockTopic41, mockTopic42, mockTopic43}, Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Data: []byte{}, Rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, MockReceipts.GetRlp(2), multihash.KECCAK_256)
BlockNumber: BlockNumber.Uint64(), Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
TxIndex: 2, State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
Index: 3, State1MhKey = shared.MultihashKeyFromCID(State1CID)
} State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
MockLog5 = &types.Log{ State2MhKey = shared.MultihashKeyFromCID(State2CID)
Address: AnotherAddress1, StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
Topics: []common.Hash{mockTopic51}, StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
Data: []byte{}, MockTrxMeta = []eth.TxModel{
BlockNumber: BlockNumber.Uint64(),
TxIndex: 2,
Index: 4,
}
MockLog6 = &types.Log{
Address: AnotherAddress2,
Topics: []common.Hash{mockTopic61},
Data: []byte{},
BlockNumber: BlockNumber.Uint64(),
TxIndex: 3,
Index: 5,
}
Tx1 = GetTxnRlp(0, MockTransactions)
Tx2 = GetTxnRlp(1, MockTransactions)
Tx3 = GetTxnRlp(2, MockTransactions)
Tx4 = GetTxnRlp(3, MockTransactions)
rctCIDs, rctIPLDData, _ = eth.GetRctLeafNodeData(MockReceipts)
HeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, MockHeaderRlp, multihash.KECCAK_256)
HeaderMhKey = shared.MultihashKeyFromCID(HeaderCID)
Trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx1, multihash.KECCAK_256)
Trx1MhKey = shared.MultihashKeyFromCID(Trx1CID)
Trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx2, multihash.KECCAK_256)
Trx2MhKey = shared.MultihashKeyFromCID(Trx2CID)
Trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx3, multihash.KECCAK_256)
Trx3MhKey = shared.MultihashKeyFromCID(Trx3CID)
Trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, Tx4, multihash.KECCAK_256)
Trx4MhKey = shared.MultihashKeyFromCID(Trx4CID)
Rct1CID = rctCIDs[0]
Rct1MhKey = shared.MultihashKeyFromCID(Rct1CID)
Rct2CID = rctCIDs[1]
Rct2MhKey = shared.MultihashKeyFromCID(Rct2CID)
Rct3CID = rctCIDs[2]
Rct3MhKey = shared.MultihashKeyFromCID(Rct3CID)
Rct4CID = rctCIDs[3]
Rct4MhKey = shared.MultihashKeyFromCID(Rct4CID)
State1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, ContractLeafNode, multihash.KECCAK_256)
State1MhKey = shared.MultihashKeyFromCID(State1CID)
State2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, AccountLeafNode, multihash.KECCAK_256)
State2MhKey = shared.MultihashKeyFromCID(State2CID)
StorageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, StorageLeafNode, multihash.KECCAK_256)
StorageMhKey = shared.MultihashKeyFromCID(StorageCID)
Rct1IPLD = rctIPLDData[0]
Rct2IPLD = rctIPLDData[1]
Rct3IPLD = rctIPLDData[2]
Rct4IPLD = rctIPLDData[3]
MockTrxMeta = []models.TxModel{
{ {
CID: "", // This is empty until we go to publish to ipfs CID: "", // This is empty until we go to publish to ipfs
MhKey: "", MhKey: "",
@ -219,17 +161,8 @@ var (
TxHash: MockTransactions[2].Hash().String(), TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode, Data: MockContractByteCode,
}, },
{
CID: "",
MhKey: "",
Src: SenderAddr.Hex(),
Dst: "",
Index: 3,
TxHash: MockTransactions[3].Hash().String(),
Data: []byte{},
},
} }
MockTrxMetaPostPublsh = []models.TxModel{ MockTrxMetaPostPublsh = []eth.TxModel{
{ {
CID: Trx1CID.String(), // This is empty until we go to publish to ipfs CID: Trx1CID.String(), // This is empty until we go to publish to ipfs
MhKey: Trx1MhKey, MhKey: Trx1MhKey,
@ -257,67 +190,83 @@ var (
TxHash: MockTransactions[2].Hash().String(), TxHash: MockTransactions[2].Hash().String(),
Data: MockContractByteCode, Data: MockContractByteCode,
}, },
{
CID: Trx4CID.String(),
MhKey: Trx4MhKey,
Src: SenderAddr.Hex(),
Dst: AnotherAddress1.String(),
Index: 3,
TxHash: MockTransactions[3].Hash().String(),
Data: []byte{},
},
} }
MockRctMeta = []models.ReceiptModel{ MockRctMeta = []eth.ReceiptModel{
{ {
LeafCID: "", CID: "",
LeafMhKey: "", MhKey: "",
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
Address.String(),
},
}, },
{ {
LeafCID: "", CID: "",
LeafMhKey: "", MhKey: "",
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
}, },
{ {
LeafCID: "", CID: "",
LeafMhKey: "", MhKey: "",
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
}, LogContracts: []string{},
{
LeafCID: "",
LeafMhKey: "",
Contract: "",
ContractHash: "",
}, },
} }
MockRctMetaPostPublish = []eth.ReceiptModel{
MockRctMetaPostPublish = []models.ReceiptModel{
{ {
LeafCID: Rct1CID.String(), CID: Rct1CID.String(),
LeafMhKey: Rct1MhKey, MhKey: Rct1MhKey,
Topic0s: []string{
mockTopic11.String(),
},
Topic1s: []string{
mockTopic12.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
Address.String(),
},
}, },
{ {
LeafCID: Rct2CID.String(), CID: Rct2CID.String(),
LeafMhKey: Rct2MhKey, MhKey: Rct2MhKey,
Topic0s: []string{
mockTopic21.String(),
},
Topic1s: []string{
mockTopic22.String(),
},
Contract: "", Contract: "",
ContractHash: "", ContractHash: "",
LogContracts: []string{
AnotherAddress.String(),
},
}, },
{ {
LeafCID: Rct3CID.String(), CID: Rct3CID.String(),
LeafMhKey: Rct3MhKey, MhKey: Rct3MhKey,
Contract: ContractAddress.String(), Contract: ContractAddress.String(),
ContractHash: ContractHash, ContractHash: ContractHash,
}, LogContracts: []string{},
{
LeafCID: Rct4CID.String(),
LeafMhKey: Rct4MhKey,
Contract: "",
ContractHash: "",
}, },
} }
@ -326,7 +275,7 @@ var (
StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes() StorageLeafKey = crypto.Keccak256Hash(storageLocation[:]).Bytes()
StorageValue = crypto.Keccak256([]byte{1, 2, 3, 4, 5}) StorageValue = crypto.Keccak256([]byte{1, 2, 3, 4, 5})
StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") StoragePartialPath = common.Hex2Bytes("20290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
StorageLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{ StorageLeafNode, _ = rlp.EncodeToBytes([]interface{}{
StoragePartialPath, StoragePartialPath,
StorageValue, StorageValue,
}) })
@ -336,14 +285,14 @@ var (
ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode) ContractCodeHash = crypto.Keccak256Hash(MockContractByteCode)
contractPath = common.Bytes2Hex([]byte{'\x06'}) contractPath = common.Bytes2Hex([]byte{'\x06'})
ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress) ContractLeafKey = testhelpers.AddressToLeafKey(ContractAddress)
ContractAccount, _ = rlp.EncodeToBytes(&types.StateAccount{ ContractAccount, _ = rlp.EncodeToBytes(state.Account{
Nonce: nonce1, Nonce: nonce1,
Balance: big.NewInt(0), Balance: big.NewInt(0),
CodeHash: ContractCodeHash.Bytes(), CodeHash: ContractCodeHash.Bytes(),
Root: common.HexToHash(ContractRoot), Root: common.HexToHash(ContractRoot),
}) })
ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45") ContractPartialPath = common.Hex2Bytes("3114658a74d9cc9f7acf2c5cd696c3494d7c344d78bfec3add0d91ec4e8d1c45")
ContractLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{ ContractLeafNode, _ = rlp.EncodeToBytes([]interface{}{
ContractPartialPath, ContractPartialPath,
ContractAccount, ContractAccount,
}) })
@ -354,42 +303,33 @@ var (
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e") AccountAddresss = common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e")
AccountLeafKey = testhelpers.Account2LeafKey AccountLeafKey = testhelpers.Account2LeafKey
Account, _ = rlp.EncodeToBytes(&types.StateAccount{ Account, _ = rlp.EncodeToBytes(state.Account{
Nonce: nonce0, Nonce: nonce0,
Balance: AccountBalance, Balance: AccountBalance,
CodeHash: AccountCodeHash.Bytes(), CodeHash: AccountCodeHash.Bytes(),
Root: common.HexToHash(AccountRoot), Root: common.HexToHash(AccountRoot),
}) })
AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45") AccountPartialPath = common.Hex2Bytes("3957f3e2f04a0764c3a0491b175f69926da61efbcc8f61fa1455fd2d2b4cdd45")
AccountLeafNode, _ = rlp.EncodeToBytes(&[]interface{}{ AccountLeafNode, _ = rlp.EncodeToBytes([]interface{}{
AccountPartialPath, AccountPartialPath,
Account, Account,
}) })
MockStateNodes = []sdtypes.StateNode{ MockStateNodes = []eth.TrieNode{
{ {
LeafKey: ContractLeafKey, LeafKey: common.BytesToHash(ContractLeafKey),
Path: []byte{'\x06'}, Path: []byte{'\x06'},
NodeValue: ContractLeafNode, Value: ContractLeafNode,
NodeType: sdtypes.Leaf, Type: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{
{
Path: []byte{},
NodeType: sdtypes.Leaf,
LeafKey: StorageLeafKey,
NodeValue: StorageLeafNode,
},
},
}, },
{ {
LeafKey: AccountLeafKey, LeafKey: common.BytesToHash(AccountLeafKey),
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
NodeValue: AccountLeafNode, Value: AccountLeafNode,
NodeType: sdtypes.Leaf, Type: sdtypes.Leaf,
StorageNodes: []sdtypes.StorageNode{},
}, },
} }
MockStateMetaPostPublish = []models.StateNodeModel{ MockStateMetaPostPublish = []eth.StateNodeModel{
{ {
CID: State1CID.String(), CID: State1CID.String(),
MhKey: State1MhKey, MhKey: State1MhKey,
@ -405,13 +345,13 @@ var (
StateKey: common.BytesToHash(AccountLeafKey).Hex(), StateKey: common.BytesToHash(AccountLeafKey).Hex(),
}, },
} }
MockStorageNodes = map[string][]sdtypes.StorageNode{ MockStorageNodes = map[string][]eth.TrieNode{
contractPath: { contractPath: {
{ {
LeafKey: StorageLeafKey, LeafKey: common.BytesToHash(StorageLeafKey),
NodeValue: StorageLeafNode, Value: StorageLeafNode,
NodeType: sdtypes.Leaf, Type: sdtypes.Leaf,
Path: []byte{}, Path: []byte{},
}, },
}, },
} }
@ -435,17 +375,16 @@ var (
StateNodes: MockStateNodes, StateNodes: MockStateNodes,
} }
Reward = shared.CalcEthBlockReward(MockBlock.Header(), MockBlock.Uncles(), MockBlock.Transactions(), MockReceipts) MockCIDWrapper = &eth2.CIDWrapper{
MockCIDWrapper = &eth.CIDWrapper{
BlockNumber: new(big.Int).Set(BlockNumber), BlockNumber: new(big.Int).Set(BlockNumber),
Header: models.HeaderModel{ Header: eth.HeaderModel{
BlockNumber: "1", BlockNumber: "1",
BlockHash: MockBlock.Hash().String(), BlockHash: MockBlock.Hash().String(),
ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000", ParentHash: "0x0000000000000000000000000000000000000000000000000000000000000000",
CID: HeaderCID.String(), CID: HeaderCID.String(),
MhKey: HeaderMhKey, MhKey: HeaderMhKey,
TotalDifficulty: MockBlock.Difficulty().String(), TotalDifficulty: MockBlock.Difficulty().String(),
Reward: Reward.String(), Reward: "5312500000000000000",
StateRoot: MockBlock.Root().String(), StateRoot: MockBlock.Root().String(),
RctRoot: MockBlock.ReceiptHash().String(), RctRoot: MockBlock.ReceiptHash().String(),
TxRoot: MockBlock.TxHash().String(), TxRoot: MockBlock.TxHash().String(),
@ -453,13 +392,12 @@ var (
Bloom: MockBlock.Bloom().Bytes(), Bloom: MockBlock.Bloom().Bytes(),
Timestamp: MockBlock.Time(), Timestamp: MockBlock.Time(),
TimesValidated: 1, TimesValidated: 1,
Coinbase: "0x0000000000000000000000000000000000000000",
}, },
Transactions: MockTrxMetaPostPublsh, Transactions: MockTrxMetaPostPublsh,
Receipts: MockRctMetaPostPublish, Receipts: MockRctMetaPostPublish,
Uncles: []models.UncleModel{}, Uncles: []eth.UncleModel{},
StateNodes: MockStateMetaPostPublish, StateNodes: MockStateMetaPostPublish,
StorageNodes: []models.StorageNodeWithStateKeyModel{ StorageNodes: []eth.StorageNodeWithStateKeyModel{
{ {
Path: []byte{}, Path: []byte{},
CID: StorageCID.String(), CID: StorageCID.String(),
@ -472,169 +410,91 @@ var (
} }
HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID) HeaderIPLD, _ = blocks.NewBlockWithCid(MockHeaderRlp, HeaderCID)
Trx1IPLD, _ = blocks.NewBlockWithCid(Tx1, Trx1CID) Trx1IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(0), Trx1CID)
Trx2IPLD, _ = blocks.NewBlockWithCid(Tx2, Trx2CID) Trx2IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(1), Trx2CID)
Trx3IPLD, _ = blocks.NewBlockWithCid(Tx3, Trx3CID) Trx3IPLD, _ = blocks.NewBlockWithCid(MockTransactions.GetRlp(2), Trx3CID)
Trx4IPLD, _ = blocks.NewBlockWithCid(Tx4, Trx4CID) Rct1IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(0), Rct1CID)
Rct2IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(1), Rct2CID)
Rct3IPLD, _ = blocks.NewBlockWithCid(MockReceipts.GetRlp(2), Rct3CID)
State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID) State1IPLD, _ = blocks.NewBlockWithCid(ContractLeafNode, State1CID)
State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID) State2IPLD, _ = blocks.NewBlockWithCid(AccountLeafNode, State2CID)
StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID) StorageIPLD, _ = blocks.NewBlockWithCid(StorageLeafNode, StorageCID)
MockIPLDs = eth.IPLDs{ MockIPLDs = eth2.IPLDs{
BlockNumber: new(big.Int).Set(BlockNumber), BlockNumber: new(big.Int).Set(BlockNumber),
Header: models.IPLDModel{ Header: ipfs.BlockModel{
Data: HeaderIPLD.RawData(), Data: HeaderIPLD.RawData(),
Key: HeaderIPLD.Cid().String(), CID: HeaderIPLD.Cid().String(),
}, },
Transactions: []models.IPLDModel{ Transactions: []ipfs.BlockModel{
{ {
Data: Trx1IPLD.RawData(), Data: Trx1IPLD.RawData(),
Key: Trx1IPLD.Cid().String(), CID: Trx1IPLD.Cid().String(),
}, },
{ {
Data: Trx2IPLD.RawData(), Data: Trx2IPLD.RawData(),
Key: Trx2IPLD.Cid().String(), CID: Trx2IPLD.Cid().String(),
}, },
{ {
Data: Trx3IPLD.RawData(), Data: Trx3IPLD.RawData(),
Key: Trx3IPLD.Cid().String(), CID: Trx3IPLD.Cid().String(),
},
{
Data: Trx4IPLD.RawData(),
Key: Trx4IPLD.Cid().String(),
}, },
}, },
Receipts: []models.IPLDModel{ Receipts: []ipfs.BlockModel{
{ {
Data: Rct1IPLD, Data: Rct1IPLD.RawData(),
Key: Rct1CID.String(), CID: Rct1IPLD.Cid().String(),
}, },
{ {
Data: Rct2IPLD, Data: Rct2IPLD.RawData(),
Key: Rct2CID.String(), CID: Rct2IPLD.Cid().String(),
}, },
{ {
Data: Rct3IPLD, Data: Rct3IPLD.RawData(),
Key: Rct3CID.String(), CID: Rct3IPLD.Cid().String(),
},
{
Data: Rct4IPLD,
Key: Rct4CID.String(),
}, },
}, },
StateNodes: []eth.StateNode{ StateNodes: []eth2.StateNode{
{ {
StateLeafKey: common.BytesToHash(ContractLeafKey), StateLeafKey: common.BytesToHash(ContractLeafKey),
Type: sdtypes.Leaf, Type: sdtypes.Leaf,
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: State1IPLD.RawData(), Data: State1IPLD.RawData(),
Key: State1IPLD.Cid().String(), CID: State1IPLD.Cid().String(),
}, },
Path: []byte{'\x06'}, Path: []byte{'\x06'},
}, },
{ {
StateLeafKey: common.BytesToHash(AccountLeafKey), StateLeafKey: common.BytesToHash(AccountLeafKey),
Type: sdtypes.Leaf, Type: sdtypes.Leaf,
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: State2IPLD.RawData(), Data: State2IPLD.RawData(),
Key: State2IPLD.Cid().String(), CID: State2IPLD.Cid().String(),
}, },
Path: []byte{'\x0c'}, Path: []byte{'\x0c'},
}, },
}, },
StorageNodes: []eth.StorageNode{ StorageNodes: []eth2.StorageNode{
{ {
StateLeafKey: common.BytesToHash(ContractLeafKey), StateLeafKey: common.BytesToHash(ContractLeafKey),
StorageLeafKey: common.BytesToHash(StorageLeafKey), StorageLeafKey: common.BytesToHash(StorageLeafKey),
Type: sdtypes.Leaf, Type: sdtypes.Leaf,
IPLD: models.IPLDModel{ IPLD: ipfs.BlockModel{
Data: StorageIPLD.RawData(), Data: StorageIPLD.RawData(),
Key: StorageIPLD.Cid().String(), CID: StorageIPLD.Cid().String(),
}, },
Path: []byte{}, Path: []byte{},
}, },
}, },
} }
LondonBlockNum = new(big.Int).Add(BlockNumber, big.NewInt(2))
MockLondonHeader = types.Header{
Time: 0,
Number: LondonBlockNum,
Root: common.HexToHash("0x00"),
Difficulty: big.NewInt(5000000),
Extra: []byte{},
BaseFee: big.NewInt(params.InitialBaseFee),
}
MockLondonTransactions, MockLondonReceipts, _ = createDynamicTransactionsAndReceipts(LondonBlockNum)
MockLondonBlock = createNewBlock(&MockLondonHeader, MockLondonTransactions, nil, MockLondonReceipts, new(trie.Trie))
) )
func createNewBlock(header *types.Header, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt, hasher types.TrieHasher) *types.Block { // createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
block := types.NewBlock(header, txs, uncles, receipts, hasher) func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
bHash := block.Hash()
for _, r := range receipts {
for _, l := range r.Logs {
l.BlockHash = bHash
}
}
return block
}
// createDynamicTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createDynamicTransactionsAndReceipts(blockNumber *big.Int) (types.Transactions, types.Receipts, common.Address) {
// make transactions
config := params.TestChainConfig
config.LondonBlock = blockNumber
trx1 := types.NewTx(&types.DynamicFeeTx{
ChainID: config.ChainID,
Nonce: 1,
GasTipCap: big.NewInt(50),
GasFeeCap: big.NewInt(100),
Gas: 50,
To: &Address,
Value: big.NewInt(1000),
Data: []byte{},
})
transactionSigner := types.MakeSigner(config, blockNumber)
mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
if err != nil {
log.Fatal(err.Error())
}
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err.Error())
}
senderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
if err != nil {
log.Fatal(err.Error())
}
// make receipts
// TODO: Change the receipt type to DynamicFeeTxType once this PR is merged.
// https://github.com/ethereum/go-ethereum/pull/22806
mockReceipt1 := &types.Receipt{
Type: types.DynamicFeeTxType,
PostState: common.HexToHash("0x0").Bytes(),
Status: types.ReceiptStatusSuccessful,
CumulativeGasUsed: 50,
Logs: []*types.Log{},
TxHash: signedTrx1.Hash(),
}
return types.Transactions{signedTrx1}, types.Receipts{mockReceipt1}, senderAddr
}
// createLegacyTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
// make transactions // make transactions
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{}) trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{}) trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode) trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
trx4 := types.NewTransaction(3, AnotherAddress1, big.NewInt(2000), 100, big.NewInt(200), []byte{})
transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber)) transactionSigner := types.MakeSigner(params.MainnetChainConfig, new(big.Int).Set(BlockNumber))
mockCurve := elliptic.P256() mockCurve := elliptic.P256()
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader) mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
@ -653,60 +513,19 @@ func createLegacyTransactionsAndReceipts() (types.Transactions, types.Receipts,
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
signedTrx4, err := types.SignTx(trx4, transactionSigner, mockPrvKey)
if err != nil {
log.Fatal(err)
}
SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx SenderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
// make receipts // make receipts
mockReceipt1 := types.NewReceipt(nil, false, 50) mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
hash1 := signedTrx1.Hash()
MockLog1.TxHash = hash1
mockReceipt1.Logs = []*types.Log{MockLog1} mockReceipt1.Logs = []*types.Log{MockLog1}
mockReceipt1.TxHash = hash1 mockReceipt1.TxHash = signedTrx1.Hash()
mockReceipt1.GasUsed = mockReceipt1.CumulativeGasUsed
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100) mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
hash2 := signedTrx2.Hash()
MockLog2.TxHash = hash2
mockReceipt2.Logs = []*types.Log{MockLog2} mockReceipt2.Logs = []*types.Log{MockLog2}
mockReceipt2.TxHash = hash2 mockReceipt2.TxHash = signedTrx2.Hash()
mockReceipt2.GasUsed = mockReceipt2.CumulativeGasUsed - mockReceipt1.CumulativeGasUsed mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
mockReceipt3.Logs = []*types.Log{}
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 175)
mockReceipt3.Logs = []*types.Log{MockLog3, MockLog4, MockLog5}
mockReceipt3.TxHash = signedTrx3.Hash() mockReceipt3.TxHash = signedTrx3.Hash()
mockReceipt3.GasUsed = mockReceipt3.CumulativeGasUsed - mockReceipt2.CumulativeGasUsed return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, SenderAddr
// Receipt with failed status.
mockReceipt4 := types.NewReceipt(nil, true, 250)
mockReceipt4.Logs = []*types.Log{MockLog6}
mockReceipt4.TxHash = signedTrx4.Hash()
mockReceipt4.GasUsed = mockReceipt4.CumulativeGasUsed - mockReceipt3.CumulativeGasUsed
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4}, SenderAddr
}
func GetTxnRlp(num int, txs types.Transactions) []byte {
buf := new(bytes.Buffer)
txs.EncodeIndex(num, buf)
tx := make([]byte, buf.Len())
copy(tx, buf.Bytes())
buf.Reset()
return tx
}
func GetRctRlp(num int, rcts types.Receipts) []byte {
buf := new(bytes.Buffer)
rcts.EncodeIndex(num, buf)
rct := make([]byte, buf.Len())
copy(rct, buf.Bytes())
buf.Reset()
return rct
} }

View File

@ -17,39 +17,32 @@
package eth package eth
import ( import (
"errors"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
sdtypes "github.com/ethereum/go-ethereum/statediff/types" sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/sirupsen/logrus" "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
) )
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
type RPCTransaction struct { type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"` BlockHash *common.Hash `json:"blockHash"`
BlockNumber *hexutil.Big `json:"blockNumber"` BlockNumber *hexutil.Big `json:"blockNumber"`
From common.Address `json:"from"` From common.Address `json:"from"`
Gas hexutil.Uint64 `json:"gas"` Gas hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"` GasPrice *hexutil.Big `json:"gasPrice"`
GasFeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` Hash common.Hash `json:"hash"`
GasTipCap *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` Input hexutil.Bytes `json:"input"`
Hash common.Hash `json:"hash"` Nonce hexutil.Uint64 `json:"nonce"`
Input hexutil.Bytes `json:"input"` To *common.Address `json:"to"`
Nonce hexutil.Uint64 `json:"nonce"` TransactionIndex *hexutil.Uint64 `json:"transactionIndex"`
To *common.Address `json:"to"` Value *hexutil.Big `json:"value"`
TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` V *hexutil.Big `json:"v"`
Value *hexutil.Big `json:"value"` R *hexutil.Big `json:"r"`
Type hexutil.Uint64 `json:"type"` S *hexutil.Big `json:"s"`
Accesses *types.AccessList `json:"accessList,omitempty"`
ChainID *hexutil.Big `json:"chainId,omitempty"`
V *hexutil.Big `json:"v"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
} }
// RPCReceipt represents a receipt that will serialize to the RPC representation of a receipt // RPCReceipt represents a receipt that will serialize to the RPC representation of a receipt
@ -89,106 +82,26 @@ type StorageResult struct {
// CallArgs represents the arguments for a call. // CallArgs represents the arguments for a call.
type CallArgs struct { type CallArgs struct {
From *common.Address `json:"from"` From *common.Address `json:"from"`
To *common.Address `json:"to"` To *common.Address `json:"to"`
Gas *hexutil.Uint64 `json:"gas"` Gas *hexutil.Uint64 `json:"gas"`
GasPrice *hexutil.Big `json:"gasPrice"` GasPrice *hexutil.Big `json:"gasPrice"`
MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` Value *hexutil.Big `json:"value"`
MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` Data *hexutil.Bytes `json:"data"`
Value *hexutil.Big `json:"value"`
Data *hexutil.Bytes `json:"data"`
AccessList *types.AccessList `json:"accessList,omitempty"`
Input *hexutil.Bytes `json:"input"`
} }
// from retrieves the transaction sender address. // account indicates the overriding fields of account during the execution of
func (arg *CallArgs) from() common.Address { // a message call.
if arg.From == nil { // Note, state and stateDiff can't be specified at the same time. If state is
return common.Address{} // set, message execution will only use the data in the given state. Otherwise
} // if statDiff is set, all diff will be applied first and then execute the call
return *arg.From // message.
} type account struct {
Nonce *hexutil.Uint64 `json:"nonce"`
// data retrieves the transaction calldata. Input field is preferred. Code *hexutil.Bytes `json:"code"`
func (arg *CallArgs) data() []byte { Balance **hexutil.Big `json:"balance"`
if arg.Input != nil { State *map[common.Hash]common.Hash `json:"state"`
return *arg.Input StateDiff *map[common.Hash]common.Hash `json:"stateDiff"`
}
if arg.Data != nil {
return *arg.Data
}
return nil
}
// ToMessage converts the transaction arguments to the Message type used by the
// core evm. This method is used in calls and traces that do not require a real
// live transaction.
func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Message, error) {
// Reject invalid combinations of pre- and post-1559 fee styles
if arg.GasPrice != nil && (arg.MaxFeePerGas != nil || arg.MaxPriorityFeePerGas != nil) {
return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
// Set sender address or use zero address if none specified.
addr := arg.from()
// Set default gas & gas price if none were set
gas := globalGasCap
if gas == 0 {
gas = uint64(math.MaxUint64 / 2)
}
if arg.Gas != nil {
gas = uint64(*arg.Gas)
}
if globalGasCap != 0 && globalGasCap < gas {
logrus.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap)
gas = globalGasCap
}
var (
gasPrice *big.Int
gasFeeCap *big.Int
gasTipCap *big.Int
)
if baseFee == nil {
// If there's no basefee, then it must be a non-1559 execution
gasPrice = new(big.Int)
if arg.GasPrice != nil {
gasPrice = arg.GasPrice.ToInt()
}
gasFeeCap, gasTipCap = gasPrice, gasPrice
} else {
// A basefee is provided, necessitating 1559-type execution
if arg.GasPrice != nil {
// User specified the legacy gas field, convert to 1559 gas typing
gasPrice = arg.GasPrice.ToInt()
gasFeeCap, gasTipCap = gasPrice, gasPrice
} else {
// User specified 1559 gas feilds (or none), use those
gasFeeCap = new(big.Int)
if arg.MaxFeePerGas != nil {
gasFeeCap = arg.MaxFeePerGas.ToInt()
}
gasTipCap = new(big.Int)
if arg.MaxPriorityFeePerGas != nil {
gasTipCap = arg.MaxPriorityFeePerGas.ToInt()
}
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
gasPrice = new(big.Int)
if gasFeeCap.BitLen() > 0 || gasTipCap.BitLen() > 0 {
gasPrice = math.BigMin(new(big.Int).Add(gasTipCap, baseFee), gasFeeCap)
}
}
}
value := new(big.Int)
if arg.Value != nil {
value = arg.Value.ToInt()
}
data := arg.data()
var accessList types.AccessList
if arg.AccessList != nil {
accessList = *arg.AccessList
}
msg := types.NewMessage(addr, arg.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true)
return msg, nil
} }
// IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server // IPLDs is used to package raw IPLD block data fetched from IPFS and returned by the server
@ -196,10 +109,10 @@ func (arg *CallArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (types.Mes
type IPLDs struct { type IPLDs struct {
BlockNumber *big.Int BlockNumber *big.Int
TotalDifficulty *big.Int TotalDifficulty *big.Int
Header models.IPLDModel Header ipfs.BlockModel
Uncles []models.IPLDModel Uncles []ipfs.BlockModel
Transactions []models.IPLDModel Transactions []ipfs.BlockModel
Receipts []models.IPLDModel Receipts []ipfs.BlockModel
StateNodes []StateNode StateNodes []StateNode
StorageNodes []StorageNode StorageNodes []StorageNode
} }
@ -208,7 +121,7 @@ type StateNode struct {
Type sdtypes.NodeType Type sdtypes.NodeType
StateLeafKey common.Hash StateLeafKey common.Hash
Path []byte Path []byte
IPLD models.IPLDModel IPLD ipfs.BlockModel
} }
type StorageNode struct { type StorageNode struct {
@ -216,7 +129,7 @@ type StorageNode struct {
StateLeafKey common.Hash StateLeafKey common.Hash
StorageLeafKey common.Hash StorageLeafKey common.Hash
Path []byte Path []byte
IPLD models.IPLDModel IPLD ipfs.BlockModel
} }
// CIDWrapper is used to direct fetching of IPLDs from IPFS // CIDWrapper is used to direct fetching of IPLDs from IPFS
@ -224,43 +137,10 @@ type StorageNode struct {
// Passed to IPLDFetcher // Passed to IPLDFetcher
type CIDWrapper struct { type CIDWrapper struct {
BlockNumber *big.Int BlockNumber *big.Int
Header models.HeaderModel Header eth.HeaderModel
Uncles []models.UncleModel Uncles []eth.UncleModel
Transactions []models.TxModel Transactions []eth.TxModel
Receipts []models.ReceiptModel Receipts []eth.ReceiptModel
StateNodes []models.StateNodeModel StateNodes []eth.StateNodeModel
StorageNodes []models.StorageNodeWithStateKeyModel StorageNodes []eth.StorageNodeWithStateKeyModel
}
// ConvertedPayload is a custom type which packages raw ETH data for publishing to IPFS and filtering to subscribers
// Returned by PayloadConverter
// Passed to IPLDPublisher and ResponseFilterer
type ConvertedPayload struct {
TotalDifficulty *big.Int
Block *types.Block
TxMetaData []models.TxModel
Receipts types.Receipts
ReceiptMetaData []models.ReceiptModel
StateNodes []sdtypes.StateNode
StorageNodes map[string][]sdtypes.StorageNode
}
// LogResult represent a log.
type LogResult struct {
LeafCID string `db:"leaf_cid"`
ReceiptID string `db:"rct_id"`
Address string `db:"address"`
Index int64 `db:"index"`
Data []byte `db:"log_data"`
Topic0 string `db:"topic0"`
Topic1 string `db:"topic1"`
Topic2 string `db:"topic2"`
Topic3 string `db:"topic3"`
LogLeafData []byte `db:"data"`
RctCID string `db:"cid"`
RctStatus uint64 `db:"post_status"`
BlockNumber string `db:"block_number"`
BlockHash string `db:"block_hash"`
TxnIndex int64 `db:"txn_index"`
TxHash string `db:"tx_hash"`
} }

View File

@ -1,267 +0,0 @@
package graphql
import (
"context"
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
gqlclient "github.com/machinebox/graphql"
)
type StorageResponse struct {
CID string `json:"cid"`
Value common.Hash `json:"value"`
IpldBlock hexutil.Bytes `json:"ipldBlock"`
}
type GetStorageAt struct {
Response StorageResponse `json:"getStorageAt"`
}
type LogResponse struct {
Topics []common.Hash `json:"topics"`
Data hexutil.Bytes `json:"data"`
Transaction TransactionResponse `json:"transaction"`
ReceiptCID string `json:"receiptCID"`
Status int32 `json:"status"`
}
type TransactionResponse struct {
Hash common.Hash `json:"hash"`
}
type GetLogs struct {
Responses []LogResponse `json:"getLogs"`
}
type IPFSBlockResponse struct {
Key string `json:"key"`
Data string `json:"data"`
}
type EthTransactionCIDResponse struct {
CID string `json:"cid"`
TxHash string `json:"txHash"`
Index int32 `json:"index"`
Src string `json:"src"`
Dst string `json:"dst"`
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
}
type EthTransactionCIDByTxHash struct {
Response EthTransactionCIDResponse `json:"ethTransactionCidByTxHash"`
}
type EthTransactionCIDsByHeaderIdResponse struct {
Nodes []EthTransactionCIDResponse `json:"nodes"`
}
type EthHeaderCIDResponse struct {
CID string `json:"cid"`
BlockNumber BigInt `json:"blockNumber"`
BlockHash string `json:"blockHash"`
ParentHash string `json:"parentHash"`
Timestamp BigInt `json:"timestamp"`
StateRoot string `json:"stateRoot"`
Td BigInt `json:"td"`
TxRoot string `json:"txRoot"`
ReceiptRoot string `json:"receiptRoot"`
UncleRoot string `json:"uncleRoot"`
Bloom string `json:"bloom"`
EthTransactionCIDsByHeaderId EthTransactionCIDsByHeaderIdResponse `json:"ethTransactionCidsByHeaderId"`
BlockByMhKey IPFSBlockResponse `json:"blockByMhKey"`
}
type AllEthHeaderCIDsResponse struct {
Nodes []EthHeaderCIDResponse `json:"nodes"`
}
type AllEthHeaderCIDs struct {
Response AllEthHeaderCIDsResponse `json:"allEthHeaderCids"`
}
type Client struct {
client *gqlclient.Client
}
func NewClient(endpoint string) *Client {
client := gqlclient.NewClient(endpoint)
return &Client{client: client}
}
func (c *Client) GetLogs(ctx context.Context, hash common.Hash, address *common.Address) ([]LogResponse, error) {
params := fmt.Sprintf(`blockHash: "%s"`, hash.String())
if address != nil {
params += fmt.Sprintf(`, contract: "%s"`, address.String())
}
getLogsQuery := fmt.Sprintf(`query{
getLogs(%s) {
data
topics
transaction {
hash
}
status
receiptCID
}
}`, params)
req := gqlclient.NewRequest(getLogsQuery)
req.Header.Set("Cache-Control", "no-cache")
var respData map[string]interface{}
err := c.client.Run(ctx, req, &respData)
if err != nil {
return nil, err
}
jsonStr, err := json.Marshal(respData)
if err != nil {
return nil, err
}
var logs GetLogs
err = json.Unmarshal(jsonStr, &logs)
if err != nil {
return nil, err
}
return logs.Responses, nil
}
func (c *Client) GetStorageAt(ctx context.Context, hash common.Hash, address common.Address, slot string) (*StorageResponse, error) {
getLogsQuery := fmt.Sprintf(`
query{
getStorageAt(blockHash: "%s", contract: "%s",slot: "%s") {
cid
value
ipldBlock
}
}
`, hash.String(), address.String(), common.HexToHash(slot))
req := gqlclient.NewRequest(getLogsQuery)
req.Header.Set("Cache-Control", "no-cache")
var respData map[string]interface{}
err := c.client.Run(ctx, req, &respData)
if err != nil {
return nil, err
}
jsonStr, err := json.Marshal(respData)
if err != nil {
return nil, err
}
var storageAt GetStorageAt
err = json.Unmarshal(jsonStr, &storageAt)
if err != nil {
return nil, err
}
return &storageAt.Response, nil
}
func (c *Client) AllEthHeaderCIDs(ctx context.Context, condition EthHeaderCIDCondition) (*AllEthHeaderCIDsResponse, error) {
var params string
if condition.BlockHash != nil {
params = fmt.Sprintf(`blockHash: "%s"`, *condition.BlockHash)
}
if condition.BlockNumber != nil {
params += fmt.Sprintf(`blockNumber: "%s"`, condition.BlockNumber.String())
}
getHeadersQuery := fmt.Sprintf(`
query{
allEthHeaderCids(condition: { %s }) {
nodes {
cid
blockNumber
blockHash
parentHash
timestamp
stateRoot
td
txRoot
receiptRoot
uncleRoot
bloom
blockByMhKey {
key
data
}
ethTransactionCidsByHeaderId {
nodes {
cid
txHash
index
src
dst
}
}
}
}
}
`, params)
req := gqlclient.NewRequest(getHeadersQuery)
req.Header.Set("Cache-Control", "no-cache")
var respData map[string]interface{}
err := c.client.Run(ctx, req, &respData)
if err != nil {
return nil, err
}
jsonStr, err := json.Marshal(respData)
if err != nil {
return nil, err
}
var allEthHeaderCIDs AllEthHeaderCIDs
err = json.Unmarshal(jsonStr, &allEthHeaderCIDs)
if err != nil {
return nil, err
}
return &allEthHeaderCIDs.Response, nil
}
func (c *Client) EthTransactionCIDByTxHash(ctx context.Context, txHash string) (*EthTransactionCIDResponse, error) {
getTxQuery := fmt.Sprintf(`
query{
ethTransactionCidByTxHash(txHash: "%s") {
cid
txHash
index
src
dst
blockByMhKey {
data
}
}
}
`, txHash)
req := gqlclient.NewRequest(getTxQuery)
req.Header.Set("Cache-Control", "no-cache")
var respData map[string]interface{}
err := c.client.Run(ctx, req, &respData)
if err != nil {
return nil, err
}
jsonStr, err := json.Marshal(respData)
if err != nil {
return nil, err
}
var ethTxCID EthTransactionCIDByTxHash
err = json.Unmarshal(jsonStr, &ethTxCID)
if err != nil {
return nil, err
}
return &ethTxCID.Response, nil
}

View File

@ -18,11 +18,8 @@
package graphql package graphql
import ( import (
"bytes"
"context" "context"
"database/sql"
"errors" "errors"
"fmt"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -31,11 +28,9 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
) )
var ( var (
@ -96,19 +91,13 @@ type Log struct {
backend *eth.Backend backend *eth.Backend
transaction *Transaction transaction *Transaction
log *types.Log log *types.Log
cid string
receiptCID string
ipldBlock []byte // log leaf node IPLD block data
status uint64
} }
// Transaction returns transaction that generated this log entry. func (l *Log) Transaction(ctx context.Context) *Transaction {
func (l *Log) Transaction(_ context.Context) *Transaction {
return l.transaction return l.transaction
} }
// Account returns the contract account which generated this log. func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
func (l *Log) Account(_ context.Context, args BlockNumberArgs) *Account {
return &Account{ return &Account{
backend: l.backend, backend: l.backend,
address: l.log.Address, address: l.log.Address,
@ -116,39 +105,16 @@ func (l *Log) Account(_ context.Context, args BlockNumberArgs) *Account {
} }
} }
// Index returns the index of this log in the block func (l *Log) Index(ctx context.Context) int32 {
func (l *Log) Index(_ context.Context) int32 {
return int32(l.log.Index) return int32(l.log.Index)
} }
// Topics returns the list of 0-4 indexed topics for the log. func (l *Log) Topics(ctx context.Context) []common.Hash {
func (l *Log) Topics(_ context.Context) []common.Hash {
return l.log.Topics return l.log.Topics
} }
// Data returns data of this log. func (l *Log) Data(ctx context.Context) hexutil.Bytes {
func (l *Log) Data(_ context.Context) hexutil.Bytes { return hexutil.Bytes(l.log.Data)
return l.log.Data
}
// Cid returns cid of the leaf node of this log.
func (l *Log) Cid(_ context.Context) string {
return l.cid
}
// IpldBlock returns IPLD block of the leaf node of this log.
func (l *Log) IpldBlock(_ context.Context) hexutil.Bytes {
return l.ipldBlock
}
// Status returns the status of the receipt IPLD block this Log exists in.
func (l *Log) Status(_ context.Context) int32 {
return int32(l.status)
}
// ReceiptCID returns the receipt CID of the receipt IPLD block this Log exists in.
func (l *Log) ReceiptCID(_ context.Context) string {
return l.receiptCID
} }
// Transaction represents an Ethereum transaction. // Transaction represents an Ethereum transaction.
@ -837,20 +803,16 @@ func (b *Block) Call(ctx context.Context, args struct {
return nil, err return nil, err
} }
} }
result, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap().Uint64()) result, gas, failed, err := eth.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, 5*time.Second, b.backend.RPCGasCap())
if err != nil {
return nil, err
}
status := hexutil.Uint64(1) status := hexutil.Uint64(1)
if result.Failed() { if failed {
status = 0 status = 0
} }
return &CallResult{ return &CallResult{
data: result.ReturnData, data: hexutil.Bytes(result),
gasUsed: hexutil.Uint64(result.UsedGas), gasUsed: hexutil.Uint64(gas),
status: status, status: status,
}, nil }, err
} }
// Resolver is the top-level object in the GraphQL hierarchy. // Resolver is the top-level object in the GraphQL hierarchy.
@ -905,11 +867,7 @@ func (r *Resolver) Blocks(ctx context.Context, args struct {
if args.To != nil { if args.To != nil {
to = rpc.BlockNumber(*args.To) to = rpc.BlockNumber(*args.To)
} else { } else {
block, err := r.backend.CurrentBlock() to = rpc.BlockNumber(r.backend.CurrentBlock().Number().Int64())
if err != nil {
return []*Block{}, nil
}
to = rpc.BlockNumber(block.Number().Int64())
} }
if to < from { if to < from {
return []*Block{}, nil return []*Block{}, nil
@ -982,386 +940,3 @@ func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria
filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics) filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
return runFilter(ctx, r.backend, filter) return runFilter(ctx, r.backend, filter)
} }
// StorageResult represents a storage slot value. All arguments are mandatory.
type StorageResult struct {
value []byte
cid string
ipldBlock []byte
}
func (s *StorageResult) Value(ctx context.Context) common.Hash {
return common.BytesToHash(s.value)
}
func (s *StorageResult) Cid(ctx context.Context) string {
return s.cid
}
func (s *StorageResult) IpldBlock(ctx context.Context) hexutil.Bytes {
return hexutil.Bytes(s.ipldBlock)
}
func (r *Resolver) GetStorageAt(ctx context.Context, args struct {
BlockHash common.Hash
Contract common.Address
Slot common.Hash
}) (*StorageResult, error) {
cid, ipldBlock, rlpValue, err := r.backend.IPLDRetriever.RetrieveStorageAtByAddressAndStorageSlotAndBlockHash(args.Contract, args.Slot, args.BlockHash)
if err != nil {
if err == sql.ErrNoRows {
ret := StorageResult{value: []byte{}, cid: "", ipldBlock: []byte{}}
return &ret, nil
}
return nil, err
}
if bytes.Equal(rlpValue, eth.EmptyNodeValue) {
return &StorageResult{value: eth.EmptyNodeValue, cid: cid, ipldBlock: ipldBlock}, nil
}
var value interface{}
err = rlp.DecodeBytes(rlpValue, &value)
if err != nil {
return nil, err
}
ret := StorageResult{value: value.([]byte), cid: cid, ipldBlock: ipldBlock}
return &ret, nil
}
func (r *Resolver) GetLogs(ctx context.Context, args struct {
BlockHash common.Hash
Contract *common.Address
}) (*[]*Log, error) {
var filter eth.ReceiptFilter
if args.Contract != nil {
filter.LogAddresses = []string{args.Contract.String()}
}
// Begin tx
tx, err := r.backend.DB.Beginx()
if err != nil {
return nil, err
}
filteredLogs, err := r.backend.Retriever.RetrieveFilteredGQLLogs(tx, filter, &args.BlockHash)
if err != nil {
return nil, err
}
if err = tx.Commit(); err != nil {
return nil, err
}
rctLog := decomposeGQLLogs(filteredLogs)
if err != nil {
return nil, err
}
ret := make([]*Log, 0, 10)
for _, l := range rctLog {
ret = append(ret, &Log{
backend: r.backend,
log: l.Log,
cid: l.CID,
receiptCID: l.RctCID,
ipldBlock: l.LogLeafData,
transaction: &Transaction{
hash: l.Log.TxHash,
},
status: l.RctStatus,
})
}
return &ret, nil
}
type logsCID struct {
Log *types.Log
CID string
RctCID string
LogLeafData []byte
RctStatus uint64
}
// decomposeGQLLogs return logs for graphql.
func decomposeGQLLogs(logCIDs []eth.LogResult) []logsCID {
logs := make([]logsCID, len(logCIDs))
for i, l := range logCIDs {
topics := make([]common.Hash, 0)
if l.Topic0 != "" {
topics = append(topics, common.HexToHash(l.Topic0))
}
if l.Topic1 != "" {
topics = append(topics, common.HexToHash(l.Topic1))
}
if l.Topic2 != "" {
topics = append(topics, common.HexToHash(l.Topic2))
}
if l.Topic3 != "" {
topics = append(topics, common.HexToHash(l.Topic3))
}
logs[i] = logsCID{
Log: &types.Log{
Address: common.HexToAddress(l.Address),
Topics: topics,
Data: l.Data,
Index: uint(l.Index),
TxHash: common.HexToHash(l.TxHash),
},
CID: l.LeafCID,
RctCID: l.RctCID,
LogLeafData: l.LogLeafData,
RctStatus: l.RctStatus,
}
}
return logs
}
type EthTransactionCID struct {
cid string
txHash string
index int32
src string
dst string
ipfsBlock IPFSBlock
}
func (t EthTransactionCID) Cid(ctx context.Context) string {
return t.cid
}
func (t EthTransactionCID) TxHash(ctx context.Context) string {
return t.txHash
}
func (t EthTransactionCID) Index(ctx context.Context) int32 {
return t.index
}
func (t EthTransactionCID) Src(ctx context.Context) string {
return t.src
}
func (t EthTransactionCID) Dst(ctx context.Context) string {
return t.dst
}
func (t EthTransactionCID) BlockByMhKey(ctx context.Context) IPFSBlock {
return t.ipfsBlock
}
type EthTransactionCIDsConnection struct {
nodes []*EthTransactionCID
}
func (transactionCIDResult EthTransactionCIDsConnection) Nodes(ctx context.Context) []*EthTransactionCID {
return transactionCIDResult.nodes
}
type IPFSBlock struct {
key string
data string
}
func (b IPFSBlock) Key(ctx context.Context) string {
return b.key
}
func (b IPFSBlock) Data(ctx context.Context) string {
return b.data
}
type EthHeaderCID struct {
cid string
blockNumber BigInt
blockHash string
parentHash string
timestamp BigInt
stateRoot string
td BigInt
txRoot string
receiptRoot string
uncleRoot string
bloom string
transactions []*EthTransactionCID
ipfsBlock IPFSBlock
}
func (h EthHeaderCID) Cid(ctx context.Context) string {
return h.cid
}
func (h EthHeaderCID) BlockNumber(ctx context.Context) BigInt {
return h.blockNumber
}
func (h EthHeaderCID) BlockHash(ctx context.Context) string {
return h.blockHash
}
func (h EthHeaderCID) ParentHash(ctx context.Context) string {
return h.parentHash
}
func (h EthHeaderCID) Timestamp(ctx context.Context) BigInt {
return h.timestamp
}
func (h EthHeaderCID) StateRoot(ctx context.Context) string {
return h.stateRoot
}
func (h EthHeaderCID) Td(ctx context.Context) BigInt {
return h.td
}
func (h EthHeaderCID) TxRoot(ctx context.Context) string {
return h.txRoot
}
func (h EthHeaderCID) ReceiptRoot(ctx context.Context) string {
return h.receiptRoot
}
func (h EthHeaderCID) UncleRoot(ctx context.Context) string {
return h.uncleRoot
}
func (h EthHeaderCID) Bloom(ctx context.Context) string {
return h.bloom
}
func (h EthHeaderCID) EthTransactionCidsByHeaderId(ctx context.Context) EthTransactionCIDsConnection {
return EthTransactionCIDsConnection{nodes: h.transactions}
}
func (h EthHeaderCID) BlockByMhKey(ctx context.Context) IPFSBlock {
return h.ipfsBlock
}
type EthHeaderCIDsConnection struct {
nodes []*EthHeaderCID
}
func (headerCIDResult EthHeaderCIDsConnection) Nodes(ctx context.Context) []*EthHeaderCID {
return headerCIDResult.nodes
}
type EthHeaderCIDCondition struct {
BlockNumber *BigInt
BlockHash *string
}
func (r *Resolver) AllEthHeaderCids(ctx context.Context, args struct {
Condition *EthHeaderCIDCondition
}) (*EthHeaderCIDsConnection, error) {
var headerCIDs []eth.HeaderCIDRecord
var err error
if args.Condition.BlockHash != nil {
headerCID, err := r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(common.HexToHash(*args.Condition.BlockHash))
if err != nil {
return nil, err
}
headerCIDs = append(headerCIDs, headerCID)
} else if args.Condition.BlockNumber != nil {
headerCIDs, err = r.backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(args.Condition.BlockNumber.ToInt().Int64())
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("provide block number or block hash")
}
// Begin tx
tx, err := r.backend.DB.Beginx()
if err != nil {
return nil, err
}
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
var resultNodes []*EthHeaderCID
for _, headerCID := range headerCIDs {
var blockNumber BigInt
blockNumber.UnmarshalText([]byte(headerCID.BlockNumber))
var timestamp BigInt
timestamp.SetUint64(headerCID.Timestamp)
var td BigInt
td.UnmarshalText([]byte(headerCID.TotalDifficulty))
ethHeaderCIDNode := EthHeaderCID{
cid: headerCID.CID,
blockNumber: blockNumber,
blockHash: headerCID.BlockHash,
parentHash: headerCID.ParentHash,
timestamp: timestamp,
stateRoot: headerCID.StateRoot,
td: td,
txRoot: headerCID.TxRoot,
receiptRoot: headerCID.RctRoot,
uncleRoot: headerCID.UncleRoot,
bloom: Bytes(headerCID.Bloom).String(),
ipfsBlock: IPFSBlock{
key: headerCID.IPLD.Key,
data: Bytes(headerCID.IPLD.Data).String(),
},
}
for _, txCID := range headerCID.TransactionCIDs {
ethHeaderCIDNode.transactions = append(ethHeaderCIDNode.transactions, &EthTransactionCID{
cid: txCID.CID,
txHash: txCID.TxHash,
index: int32(txCID.Index),
src: txCID.Src,
dst: txCID.Dst,
})
}
resultNodes = append(resultNodes, &ethHeaderCIDNode)
}
return &EthHeaderCIDsConnection{
nodes: resultNodes,
}, nil
}
func (r *Resolver) EthTransactionCidByTxHash(ctx context.Context, args struct {
TxHash string
}) (*EthTransactionCID, error) {
txCID, err := r.backend.Retriever.RetrieveTxCIDByHash(args.TxHash)
if err != nil {
return nil, err
}
return &EthTransactionCID{
cid: txCID.CID,
txHash: txCID.TxHash,
index: int32(txCID.Index),
src: txCID.Src,
dst: txCID.Dst,
ipfsBlock: IPFSBlock{
key: txCID.IPLD.Key,
data: Bytes(txCID.IPLD.Data).String(),
},
}, nil
}

View File

@ -17,318 +17,15 @@
package graphql_test package graphql_test
import ( import (
"context"
"fmt"
"math/big"
"strconv"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
"github.com/jmoiron/sqlx"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/graphql"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth/test_helpers"
"github.com/vulcanize/ipld-eth-server/v3/pkg/graphql"
"github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared"
) )
var _ = Describe("GraphQL", func() { var _ = Describe("GraphQL", func() {
const ( It("Builds the schema and creates a new handler", func() {
gqlEndPoint = "127.0.0.1:8083" _, err := graphql.NewHandler(nil)
)
var (
randomAddr = common.HexToAddress("0x1C3ab14BBaD3D99F4203bd7a11aCB94882050E6f")
randomHash = crypto.Keccak256Hash(randomAddr.Bytes())
blocks []*types.Block
receipts []types.Receipts
chain *core.BlockChain
db *sqlx.DB
blockHashes []common.Hash
backend *eth.Backend
graphQLServer *graphql.Service
chainConfig = params.TestChainConfig
mockTD = big.NewInt(1337)
client = graphql.NewClient(fmt.Sprintf("http://%s/graphql", gqlEndPoint))
ctx = context.Background()
blockHash common.Hash
contractAddress common.Address
)
It("test init", func() {
var err error
db = shared.SetupDB()
transformer := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
backend, err = eth.NewEthBackend(db, &eth.Config{
ChainConfig: chainConfig,
VMConfig: vm.Config{},
RPCGasCap: big.NewInt(10000000000),
GroupCacheConfig: &ethServerShared.GroupCacheConfig{
StateDB: ethServerShared.GroupConfig{
Name: "graphql_test",
CacheSizeInMB: 8,
CacheExpiryInMins: 60,
LogStatsIntervalInSecs: 0,
},
},
})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
// make the test blockchain (and state)
blocks, receipts, chain = test_helpers.MakeChain(5, test_helpers.Genesis, test_helpers.TestChainGen)
params := statediff.Params{
IntermediateStateNodes: true,
IntermediateStorageNodes: true,
}
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
builder := statediff.NewBuilder(chain.StateCache())
for i, block := range blocks {
blockHashes = append(blockHashes, block.Hash())
var args statediff.Args
var rcts types.Receipts
if i == 0 {
args = statediff.Args{
OldStateRoot: common.Hash{},
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
} else {
args = statediff.Args{
OldStateRoot: blocks[i-1].Root(),
NewStateRoot: block.Root(),
BlockNumber: block.Number(),
BlockHash: block.Hash(),
}
rcts = receipts[i-1]
}
var diff sdtypes.StateObject
diff, err = builder.BuildStateDiffObject(args, params)
Expect(err).ToNot(HaveOccurred())
tx, err := transformer.PushBlock(block, rcts, mockTD)
Expect(err).ToNot(HaveOccurred())
for _, node := range diff.Nodes {
err = transformer.PushStateNode(tx, node, block.Hash().String())
Expect(err).ToNot(HaveOccurred())
}
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
}
// Insert some non-canonical data into the database so that we test our ability to discern canonicity
indexAndPublisher := shared.SetupTestStateDiffIndexer(ctx, chainConfig, test_helpers.Genesis.Hash())
blockHash = test_helpers.MockBlock.Hash()
contractAddress = test_helpers.ContractAddr
tx, err := indexAndPublisher.PushBlock(test_helpers.MockBlock, test_helpers.MockReceipts, test_helpers.MockBlock.Difficulty())
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
// The non-canonical header has a child
tx, err = indexAndPublisher.PushBlock(test_helpers.MockChild, test_helpers.MockReceipts, test_helpers.MockChild.Difficulty())
Expect(err).ToNot(HaveOccurred())
ccHash := sdtypes.CodeAndCodeHash{
Hash: test_helpers.CodeHash,
Code: test_helpers.ContractCode,
}
err = indexAndPublisher.PushCodeAndCodeHash(tx, ccHash)
Expect(err).ToNot(HaveOccurred())
err = tx.Submit(err)
Expect(err).ToNot(HaveOccurred())
graphQLServer, err = graphql.New(backend, gqlEndPoint, nil, []string{"*"}, rpc.HTTPTimeouts{})
Expect(err).ToNot(HaveOccurred())
err = graphQLServer.Start(nil)
Expect(err).ToNot(HaveOccurred())
})
defer It("test teardown", func() {
err := graphQLServer.Stop()
Expect(err).ToNot(HaveOccurred())
shared.TearDownDB(db)
chain.Stop()
})
Describe("eth_getLogs", func() {
It("Retrieves logs that matches the provided blockHash and contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, &contractAddress)
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
{
Topics: test_helpers.MockLog1.Topics,
Data: hexutil.Bytes(test_helpers.MockLog1.Data),
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[0].Hash()},
ReceiptCID: test_helpers.Rct1CID.String(),
Status: int32(test_helpers.MockReceipts[0].Status),
},
}
Expect(logs).To(Equal(expectedLogs))
})
It("Retrieves logs for the failed receipt status that matches the provided blockHash and another contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, &test_helpers.AnotherAddress2)
Expect(err).ToNot(HaveOccurred())
expectedLogs := []graphql.LogResponse{
{
Topics: test_helpers.MockLog6.Topics,
Data: hexutil.Bytes(test_helpers.MockLog6.Data),
Transaction: graphql.TransactionResponse{Hash: test_helpers.MockTransactions[3].Hash()},
ReceiptCID: test_helpers.Rct4CID.String(),
Status: int32(test_helpers.MockReceipts[3].Status),
},
}
Expect(logs).To(Equal(expectedLogs))
})
It("Retrieves all the logs for the receipt that matches the provided blockHash and nil contract address", func() {
logs, err := client.GetLogs(ctx, blockHash, nil)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(6))
})
It("Retrieves logs with random hash", func() {
logs, err := client.GetLogs(ctx, randomHash, &contractAddress)
Expect(err).ToNot(HaveOccurred())
Expect(len(logs)).To(Equal(0))
})
})
Describe("eth_getStorageAt", func() {
It("Retrieves the storage value at the provided contract address and storage leaf key at the block with the provided hash", func() {
storageRes, err := client.GetStorageAt(ctx, blockHashes[2], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("01")))
storageRes, err = client.GetStorageAt(ctx, blockHashes[3], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("03")))
storageRes, err = client.GetStorageAt(ctx, blockHashes[4], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.HexToHash("09")))
})
It("Retrieves empty data if it tries to access a contract at a blockHash which does not exist", func() {
storageRes, err := client.GetStorageAt(ctx, blockHashes[0], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
storageRes, err = client.GetStorageAt(ctx, blockHashes[1], contractAddress, test_helpers.IndexOne)
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
})
It("Retrieves empty data if it tries to access a contract slot which does not exist", func() {
storageRes, err := client.GetStorageAt(ctx, blockHashes[3], contractAddress, randomHash.Hex())
Expect(err).ToNot(HaveOccurred())
Expect(storageRes.Value).To(Equal(common.Hash{}))
})
})
Describe("allEthHeaderCids", func() {
It("Retrieves header_cids that matches the provided blockNumber", func() {
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockNumber: new(graphql.BigInt).SetUint64(2)})
Expect(err).ToNot(HaveOccurred())
headerCIDs, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockNumber(2)
Expect(err).ToNot(HaveOccurred())
for idx, headerCID := range headerCIDs {
ethHeaderCID := allEthHeaderCIDsResp.Nodes[idx]
compareEthHeaderCID(ethHeaderCID, headerCID)
}
})
It("Retrieves header_cids that matches the provided blockHash", func() {
blockHash := blocks[1].Hash().String()
allEthHeaderCIDsResp, err := client.AllEthHeaderCIDs(ctx, graphql.EthHeaderCIDCondition{BlockHash: &blockHash})
Expect(err).ToNot(HaveOccurred())
headerCID, err := backend.Retriever.RetrieveHeaderAndTxCIDsByBlockHash(blocks[1].Hash())
Expect(err).ToNot(HaveOccurred())
Expect(len(allEthHeaderCIDsResp.Nodes)).To(Equal(1))
ethHeaderCID := allEthHeaderCIDsResp.Nodes[0]
compareEthHeaderCID(ethHeaderCID, headerCID)
})
})
Describe("ethTransactionCidByTxHash", func() {
It("Retrieves tx_cid that matches the provided txHash", func() {
txHash := blocks[2].Transactions()[0].Hash().String()
ethTransactionCIDResp, err := client.EthTransactionCIDByTxHash(ctx, txHash)
Expect(err).ToNot(HaveOccurred())
txCID, err := backend.Retriever.RetrieveTxCIDByHash(txHash)
Expect(err).ToNot(HaveOccurred())
compareEthTxCID(*ethTransactionCIDResp, txCID)
Expect(ethTransactionCIDResp.BlockByMhKey.Data).To(Equal(graphql.Bytes(txCID.IPLD.Data).String()))
})
}) })
}) })
func compareEthHeaderCID(ethHeaderCID graphql.EthHeaderCIDResponse, headerCID eth.HeaderCIDRecord) {
blockNumber, err := strconv.ParseInt(headerCID.BlockNumber, 10, 64)
Expect(err).ToNot(HaveOccurred())
td, err := strconv.ParseInt(headerCID.TotalDifficulty, 10, 64)
Expect(err).ToNot(HaveOccurred())
Expect(ethHeaderCID.CID).To(Equal(headerCID.CID))
Expect(ethHeaderCID.BlockNumber).To(Equal(*new(graphql.BigInt).SetUint64(uint64(blockNumber))))
Expect(ethHeaderCID.BlockHash).To(Equal(headerCID.BlockHash))
Expect(ethHeaderCID.ParentHash).To(Equal(headerCID.ParentHash))
Expect(ethHeaderCID.Timestamp).To(Equal(*new(graphql.BigInt).SetUint64(headerCID.Timestamp)))
Expect(ethHeaderCID.StateRoot).To(Equal(headerCID.StateRoot))
Expect(ethHeaderCID.Td).To(Equal(*new(graphql.BigInt).SetUint64(uint64(td))))
Expect(ethHeaderCID.TxRoot).To(Equal(headerCID.TxRoot))
Expect(ethHeaderCID.ReceiptRoot).To(Equal(headerCID.RctRoot))
Expect(ethHeaderCID.UncleRoot).To(Equal(headerCID.UncleRoot))
Expect(ethHeaderCID.Bloom).To(Equal(graphql.Bytes(headerCID.Bloom).String()))
for tIdx, txCID := range headerCID.TransactionCIDs {
ethTxCID := ethHeaderCID.EthTransactionCIDsByHeaderId.Nodes[tIdx]
compareEthTxCID(ethTxCID, txCID)
}
Expect(ethHeaderCID.BlockByMhKey.Data).To(Equal(graphql.Bytes(headerCID.IPLD.Data).String()))
Expect(ethHeaderCID.BlockByMhKey.Key).To(Equal(headerCID.IPLD.Key))
}
func compareEthTxCID(ethTxCID graphql.EthTransactionCIDResponse, txCID eth.TransactionCIDRecord) {
Expect(ethTxCID.CID).To(Equal(txCID.CID))
Expect(ethTxCID.TxHash).To(Equal(txCID.TxHash))
Expect(ethTxCID.Index).To(Equal(int32(txCID.Index)))
Expect(ethTxCID.Src).To(Equal(txCID.Src))
Expect(ethTxCID.Dst).To(Equal(txCID.Dst))
}

View File

@ -25,7 +25,8 @@ const schema string = `
# An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles. # An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.
scalar Bytes scalar Bytes
# BigInt is a large integer. Input is accepted as either a JSON number or as a string. # BigInt is a large integer. Input is accepted as either a JSON number or as a string.
# Input and output strings may be either decimal or 0x-prefixed hexadecimal depending upon the resolver implementation. # Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all
# 0x-prefixed hexadecimal.
scalar BigInt scalar BigInt
# Long is a 64 bit unsigned integer. # Long is a 64 bit unsigned integer.
scalar Long scalar Long
@ -64,19 +65,7 @@ const schema string = `
# Data is unindexed data for this log. # Data is unindexed data for this log.
data: Bytes! data: Bytes!
# Transaction is the transaction that generated this log entry. # Transaction is the transaction that generated this log entry.
transaction: Transaction transaction: Transaction!
# CID for the leaf node IPLD block of the log.
cid: String!
# ReceiptCID for the Receipt IPLD block this Log exists in.
receiptCID: String!
# IPLD block data for the Log Leaf node.
ipldBlock: Bytes!
# Status of the Receipt IPLD block this Log exists in.
status: Int!
} }
# Transaction is an Ethereum transaction. # Transaction is an Ethereum transaction.
@ -137,16 +126,16 @@ const schema string = `
# empty, results will not be filtered by address. # empty, results will not be filtered by address.
addresses: [Address!] addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list # Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any # of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the # topic. Non-empty elements represent an alternative that matches any of the
# contained topics. # contained topics.
# #
# Examples: # Examples:
# - [] or nil matches any topic list # - [] or nil matches any topic list
# - [[A]] matches topic A in first position # - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position # - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position # - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!] topics: [[Bytes32!]!]
} }
@ -257,98 +246,29 @@ const schema string = `
# empty, results will not be filtered by address. # empty, results will not be filtered by address.
addresses: [Address!] addresses: [Address!]
# Topics list restricts matches to particular event topics. Each event has a list # Topics list restricts matches to particular event topics. Each event has a list
# of topics. Topics matches a prefix of that list. An empty element array matches any # of topics. Topics matches a prefix of that list. An empty element array matches any
# topic. Non-empty elements represent an alternative that matches any of the # topic. Non-empty elements represent an alternative that matches any of the
# contained topics. # contained topics.
# #
# Examples: # Examples:
# - [] or nil matches any topic list # - [] or nil matches any topic list
# - [[A]] matches topic A in first position # - [[A]] matches topic A in first position
# - [[], [B]] matches any topic in first position, B in second position # - [[], [B]] matches any topic in first position, B in second position
# - [[A], [B]] matches topic A in first position, B in second position # - [[A], [B]] matches topic A in first position, B in second position
# - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position # - [[A, B]], [C, D]] matches topic (A OR B) in first position, (C OR D) in second position
topics: [[Bytes32!]!] topics: [[Bytes32!]!]
} }
# Storage trie value with IPLD data.
type StorageResult {
value: Bytes32!
# CID for the storage trie IPLD block.
cid: String!
# Storage trie IPLD block.
ipldBlock: Bytes!
}
input EthHeaderCidCondition {
blockNumber: BigInt
blockHash: String
}
type EthTransactionCid {
cid: String!
txHash: String!
index: Int!
src: String!
dst: String!
blockByMhKey: IPFSBlock!
}
type EthTransactionCidsConnection {
nodes: [EthTransactionCid]!
}
type IPFSBlock {
key: String!
data: String!
}
type EthHeaderCid {
cid: String!
blockNumber: BigInt!
blockHash: String!
parentHash: String!
timestamp: BigInt!
stateRoot: String!
td: BigInt!
txRoot: String!
receiptRoot: String!
uncleRoot: String!
bloom: String!
ethTransactionCidsByHeaderId: EthTransactionCidsConnection!
blockByMhKey: IPFSBlock!
}
type EthHeaderCidsConnection {
nodes: [EthHeaderCid]!
}
type Query { type Query {
# Block fetches an Ethereum block by number or by hash. If neither is # Block fetches an Ethereum block by number or by hash. If neither is
# supplied, the most recent known block is returned. # supplied, the most recent known block is returned.
block(number: Long, hash: Bytes32): Block block(number: Long, hash: Bytes32): Block
# Blocks returns all the blocks between two numbers, inclusive. If # Blocks returns all the blocks between two numbers, inclusive. If
# to is not supplied, it defaults to the most recent known block. # to is not supplied, it defaults to the most recent known block.
blocks(from: Long!, to: Long): [Block!]! blocks(from: Long!, to: Long): [Block!]!
# Transaction returns a transaction specified by its hash. # Transaction returns a transaction specified by its hash.
transaction(hash: Bytes32!): Transaction transaction(hash: Bytes32!): Transaction
# Logs returns log entries matching the provided filter. # Logs returns log entries matching the provided filter.
logs(filter: FilterCriteria!): [Log!]! logs(filter: FilterCriteria!): [Log!]!
# Get storage slot by block hash and contract address.
getStorageAt(blockHash: Bytes32!, contract: Address!, slot: Bytes32!): StorageResult
# Get contract logs by block hash and contract address.
getLogs(blockHash: Bytes32!, contract: Address): [Log!]
# PostGraphile alternative to get headers with transactions using block number or block hash.
allEthHeaderCids(condition: EthHeaderCidCondition): EthHeaderCidsConnection
# PostGraphile alternative to get transactions using transaction hash.
ethTransactionCidByTxHash(txHash: String!): EthTransactionCid
} }
` `

View File

@ -29,7 +29,7 @@ import (
"github.com/graph-gophers/graphql-go/relay" "github.com/graph-gophers/graphql-go/relay"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
// Service encapsulates a GraphQL service. // Service encapsulates a GraphQL service.
@ -69,7 +69,7 @@ func (s *Service) Start(server *p2p.Server) error {
return err return err
} }
handler := node.NewHTTPHandlerStack(s.handler, s.cors, s.vhosts, nil) handler := node.NewHTTPHandlerStack(s.handler, s.cors, s.vhosts)
// start http server // start http server
_, addr, err := node.StartHTTPEndpoint(s.endpoint, rpc.DefaultHTTPTimeouts, handler) _, addr, err := node.StartHTTPEndpoint(s.endpoint, rpc.DefaultHTTPTimeouts, handler)

View File

@ -1,122 +0,0 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package graphql
import (
"encoding/hex"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// Bytes marshals as a JSON string with \x prefix.
// The empty slice marshals as "\x".
type Bytes []byte
// MarshalText implements encoding.TextMarshaler
func (b Bytes) MarshalText() ([]byte, error) {
result := make([]byte, len(b)*2+2)
copy(result, `\x`)
hex.Encode(result[2:], b)
return result, nil
}
// String returns the hex encoding of b.
func (b Bytes) String() string {
return b.encode()
}
// Encode encodes b as a hex string with "\x" prefix.
// This is to make the output to be the same as given by postgraphile.
// graphql-go prepends another "\" to the output resulting in prefix "\\x".
func (b Bytes) encode() string {
result := make([]byte, len(b)*2+2)
copy(result, `\x`)
hex.Encode(result[2:], b)
return string(result)
}
type BigInt big.Int
// ToInt converts b to a big.Int.
func (b *BigInt) ToInt() *big.Int {
return (*big.Int)(b)
}
// String returns value of b as a decimal string.
func (b *BigInt) String() string {
return b.ToInt().String()
}
// SetUint64 sets b to x and returns x.
func (b *BigInt) SetUint64(x uint64) *BigInt {
var val big.Int
val.SetUint64(x)
*b = (BigInt)(val)
return b
}
// MarshalText implements encoding.TextMarshaler
func (b BigInt) MarshalText() ([]byte, error) {
return []byte(b.String()), nil
}
// UnmarshalText implements encoding.TextUnmarshaler
func (b *BigInt) UnmarshalText(input []byte) error {
raw, err := checkNumberText(input)
if err != nil {
return err
}
if len(raw) > 64 {
return hexutil.ErrBig256Range
}
var val big.Int
val.SetString(string(input[:]), 10)
*b = (BigInt)(val)
return nil
}
// ImplementsGraphQLType returns true if BigInt implements the provided GraphQL type.
func (b BigInt) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *BigInt) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
return b.UnmarshalText([]byte(input))
case int32:
var num big.Int
num.SetInt64(int64(input))
*b = BigInt(num)
default:
err = fmt.Errorf("unexpected type %T for BigInt", input)
}
return err
}
func checkNumberText(input []byte) (raw []byte, err error) {
if len(input) == 0 {
return nil, nil // empty strings are allowed
}
if len(input) > 1 && input[0] == '0' {
return nil, hexutil.ErrLeadingZero
}
return input, nil
}

View File

@ -1,90 +0,0 @@
// VulcanizeDB
// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package net
import (
"fmt"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
)
// APIName is the namespace for the watcher's eth api
const APIName = "net"
// APIVersion is the version of the watcher's eth api
const APIVersion = "0.0.1"
// PublicNetAPI is the net nampespace API
type PublicNetAPI struct {
// Proxy node for forwarding cache misses
networkVersion uint64
rpc *rpc.Client
ethClient *ethclient.Client
}
// NewPublicNetAPI creates a new PublicNetAPI with the provided underlying Backend
func NewPublicNetAPI(networkID uint64, client *rpc.Client) *PublicNetAPI {
var ethClient *ethclient.Client
if client != nil {
ethClient = ethclient.NewClient(client)
}
return &PublicNetAPI{
networkVersion: networkID,
rpc: client,
ethClient: ethClient,
}
}
// Listening returns an indication if the node is listening for network connections.
func (pna *PublicNetAPI) Listening() bool {
// in this case it is actually whether or not the proxied node is listening
if pna.rpc != nil {
var listening bool
if err := pna.rpc.Call(&listening, "net_listening"); err == nil {
return listening
}
}
return false
}
// PeerCount returns the number of connected peers
func (pna *PublicNetAPI) PeerCount() hexutil.Uint {
// in this case it is actually the peer count of the proxied node
if pna.rpc != nil {
var num hexutil.Uint
if err := pna.rpc.Call(&num, "net_peerCount"); err == nil {
return num
}
}
return hexutil.Uint(0)
}
// Version returns the current ethereum protocol version.
func (pna *PublicNetAPI) Version() string {
if pna.networkVersion != 0 {
return fmt.Sprintf("%d", pna.networkVersion)
}
if pna.rpc != nil {
var version string
if err := pna.rpc.Call(&version, "net_version"); err == nil {
return version
}
}
return ""
}

View File

@ -1,47 +0,0 @@
// VulcanizeDB
// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package net_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/ipld-eth-server/v3/pkg/net"
)
var _ = Describe("API", func() {
var (
api *net.PublicNetAPI
)
BeforeEach(func() {
api = net.NewPublicNetAPI(1, nil)
})
Describe("net_listening", func() {
It("Retrieves whether or not the node is listening to the p2p network", func() {
listening := api.Listening()
Expect(listening).To(BeFalse())
})
})
Describe("net_version", func() {
It("Retrieves the network id", func() {
version := api.Version()
Expect(version).To(Equal("1"))
})
})
// TODO: test PeerCount with mock proxy node
})

View File

@ -1,35 +0,0 @@
// VulcanizeDB
// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package net_test
import (
"io/ioutil"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
)
func TestNetSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "eth ipld server net suite test")
}
var _ = BeforeSuite(func() {
logrus.SetOutput(ioutil.Discard)
})

View File

@ -29,11 +29,11 @@ import (
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) { func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
srv := rpc.NewServer() srv := rpc.NewServer()
err := node.RegisterApis(apis, modules, srv, false) err := node.RegisterApisFromWhitelist(apis, modules, srv, false)
if err != nil { if err != nil {
utils.Fatalf("Could not register HTTP API: %w", err) utils.Fatalf("Could not register HTTP API: %w", err)
} }
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil) handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
// start http server // start http server
_, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler) _, addr, err := node.StartHTTPEndpoint(endpoint, rpc.DefaultHTTPTimeouts, handler)
@ -41,7 +41,7 @@ func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors [
utils.Fatalf("Could not start RPC api: %v", err) utils.Fatalf("Could not start RPC api: %v", err)
} }
extapiURL := fmt.Sprintf("http://%v/", addr) extapiURL := fmt.Sprintf("http://%v/", addr)
log.Infof("HTTP endpoint opened %s", extapiURL) log.Info("HTTP endpoint opened", "url", extapiURL)
return srv, err return srv, err
} }

View File

@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom" "github.com/vulcanize/ipld-eth-server/pkg/prom"
) )
var ( var (

View File

@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom" "github.com/vulcanize/ipld-eth-server/pkg/prom"
) )
// StartWSEndpoint starts a websocket endpoint. // StartWSEndpoint starts a websocket endpoint.
@ -37,7 +37,7 @@ func StartWSEndpoint(endpoint string, apis []rpc.API, modules []string, wsOrigin
// Register all the APIs exposed by the services // Register all the APIs exposed by the services
handler := rpc.NewServer() handler := rpc.NewServer()
err = node.RegisterApis(apis, modules, handler, exposeAll) err = node.RegisterApisFromWhitelist(apis, modules, handler, exposeAll)
if err != nil { if err != nil {
utils.Fatalf("Could not register WS API: %w", err) utils.Fatalf("Could not register WS API: %w", err)
} }

View File

@ -19,11 +19,14 @@ package serve
import ( import (
"context" "context"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/types"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" "github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
v "github.com/vulcanize/ipld-eth-server/version"
) )
// APIName is the namespace used for the state diffing service API // APIName is the namespace used for the state diffing service API
@ -34,15 +37,13 @@ const APIVersion = "0.0.1"
// PublicServerAPI is the public api for the watcher // PublicServerAPI is the public api for the watcher
type PublicServerAPI struct { type PublicServerAPI struct {
w Server w Server
rpc *rpc.Client
} }
// NewPublicServerAPI creates a new PublicServerAPI with the provided underlying Server process // NewPublicServerAPI creates a new PublicServerAPI with the provided underlying Server process
func NewPublicServerAPI(w Server, client *rpc.Client) *PublicServerAPI { func NewPublicServerAPI(w Server) *PublicServerAPI {
return &PublicServerAPI{ return &PublicServerAPI{
w: w, w: w,
rpc: client,
} }
} }
@ -85,12 +86,36 @@ func (api *PublicServerAPI) Stream(ctx context.Context, params eth.SubscriptionS
return rpcSub, nil return rpcSub, nil
} }
// WatchAddress makes a geth WatchAddress API call with the given operation and args // Chain returns the chain type that this watcher instance supports
func (api *PublicServerAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error { func (api *PublicServerAPI) Chain() shared.ChainType {
err := api.rpc.Call(nil, "statediff_watchAddress", operation, args) return shared.Ethereum
if err != nil { }
return err
} // Struct for holding watcher meta data
type InfoAPI struct{}
return nil
// NewInfoAPI creates a new InfoAPI
func NewInfoAPI() *InfoAPI {
return &InfoAPI{}
}
// Modules returns modules supported by this api
func (iapi *InfoAPI) Modules() map[string]string {
return map[string]string{
"vdb": "Stream",
}
}
// NodeInfo gathers and returns a collection of metadata for the watcher
func (iapi *InfoAPI) NodeInfo() *p2p.NodeInfo {
return &p2p.NodeInfo{
// TODO: formalize this
ID: "vulcanizeDB",
Name: "ipld-eth-server",
}
}
// Version returns the version of the watcher
func (iapi *InfoAPI) Version() string {
return v.VersionWithMeta
} }

View File

@ -17,23 +17,23 @@
package serve package serve
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/ethereum/go-ethereum/rpc"
"github.com/vulcanize/ipld-eth-indexer/pkg/shared"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/jmoiron/sqlx"
"github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-server/v3/pkg/prom" "github.com/ethereum/go-ethereum/params"
ethServerShared "github.com/vulcanize/ipld-eth-server/v3/pkg/shared" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-indexer/utils"
"github.com/vulcanize/ipld-eth-server/pkg/prom"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
// Env variables // Env variables
@ -48,53 +48,21 @@ const (
ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR" ETH_DEFAULT_SENDER_ADDR = "ETH_DEFAULT_SENDER_ADDR"
ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP" ETH_RPC_GAS_CAP = "ETH_RPC_GAS_CAP"
ETH_CHAIN_CONFIG = "ETH_CHAIN_CONFIG"
ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF" ETH_SUPPORTS_STATEDIFF = "ETH_SUPPORTS_STATEDIFF"
ETH_FORWARD_ETH_CALLS = "ETH_FORWARD_ETH_CALLS"
ETH_PROXY_ON_ERROR = "ETH_PROXY_ON_ERROR"
VALIDATOR_ENABLED = "VALIDATOR_ENABLED"
VALIDATOR_EVERY_NTH_BLOCK = "VALIDATOR_EVERY_NTH_BLOCK"
) )
// Config struct // Config struct
type Config struct { type Config struct {
DB *sqlx.DB DB *postgres.DB
DBConfig postgres.Config DBConfig postgres.Config
WSEndpoint string
WSEnabled bool HTTPEndpoint string
WSEndpoint string IPCEndpoint string
HTTPEnabled bool
HTTPEndpoint string
IPCEnabled bool
IPCEndpoint string
EthGraphqlEnabled bool
EthGraphqlEndpoint string
IpldGraphqlEnabled bool
IpldGraphqlEndpoint string
IpldPostgraphileEndpoint string
TracingHttpEndpoint string
TracingPostgraphileEndpoint string
ChainConfig *params.ChainConfig ChainConfig *params.ChainConfig
DefaultSender *common.Address DefaultSender *common.Address
RPCGasCap *big.Int RPCGasCap *big.Int
EthHttpEndpoint string
Client *rpc.Client Client *rpc.Client
SupportStateDiff bool SupportStateDiff bool
ForwardEthCalls bool
ProxyOnError bool
NodeNetworkID string
// Cache configuration.
GroupCache *ethServerShared.GroupCacheConfig
StateValidationEnabled bool
StateValidationEveryNthBlock uint64
} }
// NewConfig is used to initialize a watcher config from a .toml file // NewConfig is used to initialize a watcher config from a .toml file
@ -102,112 +70,47 @@ type Config struct {
func NewConfig() (*Config, error) { func NewConfig() (*Config, error) {
c := new(Config) c := new(Config)
viper.BindEnv("ethereum.httpPath", ETH_HTTP_PATH) viper.BindEnv("server.wsPath", SERVER_WS_PATH)
viper.BindEnv("server.ipcPath", SERVER_IPC_PATH)
viper.BindEnv("server.httpPath", SERVER_HTTP_PATH)
viper.BindEnv("ethereum.httpPath", shared.ETH_HTTP_PATH)
viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR) viper.BindEnv("ethereum.defaultSender", ETH_DEFAULT_SENDER_ADDR)
viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP) viper.BindEnv("ethereum.rpcGasCap", ETH_RPC_GAS_CAP)
viper.BindEnv("ethereum.chainConfig", ETH_CHAIN_CONFIG)
viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF) viper.BindEnv("ethereum.supportsStateDiff", ETH_SUPPORTS_STATEDIFF)
viper.BindEnv("ethereum.forwardEthCalls", ETH_FORWARD_ETH_CALLS)
viper.BindEnv("ethereum.proxyOnError", ETH_PROXY_ON_ERROR)
c.dbInit() c.DBConfig.Init()
ethHTTP := viper.GetString("ethereum.httpPath") ethHTTP := viper.GetString("ethereum.httpPath")
ethHTTPEndpoint := fmt.Sprintf("http://%s", ethHTTP) nodeInfo, cli, err := shared.GetEthNodeAndClient(fmt.Sprintf("http://%s", ethHTTP))
nodeInfo, cli, err := getEthNodeAndClient(ethHTTPEndpoint)
c.NodeNetworkID = nodeInfo.NetworkID
if err != nil { if err != nil {
return nil, err return nil, err
} }
c.Client = cli c.Client = cli
c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff") c.SupportStateDiff = viper.GetBool("ethereum.supportsStateDiff")
c.ForwardEthCalls = viper.GetBool("ethereum.forwardEthCalls")
c.ProxyOnError = viper.GetBool("ethereum.proxyOnError")
c.EthHttpEndpoint = ethHTTPEndpoint
// websocket server wsPath := viper.GetString("server.wsPath")
wsEnabled := viper.GetBool("eth.server.ws") if wsPath == "" {
if wsEnabled { wsPath = "127.0.0.1:8080"
wsPath := viper.GetString("eth.server.wsPath")
if wsPath == "" {
wsPath = "127.0.0.1:8080"
}
c.WSEndpoint = wsPath
} }
c.WSEnabled = wsEnabled c.WSEndpoint = wsPath
ipcPath := viper.GetString("server.ipcPath")
// ipc server if ipcPath == "" {
ipcEnabled := viper.GetBool("eth.server.ipc") home, err := os.UserHomeDir()
if ipcEnabled { if err != nil {
ipcPath := viper.GetString("eth.server.ipcPath") return nil, err
if ipcPath == "" {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
} }
c.IPCEndpoint = ipcPath ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
} }
c.IPCEnabled = ipcEnabled c.IPCEndpoint = ipcPath
httpPath := viper.GetString("server.httpPath")
// http server if httpPath == "" {
httpEnabled := viper.GetBool("eth.server.http") httpPath = "127.0.0.1:8081"
if httpEnabled {
httpPath := viper.GetString("eth.server.httpPath")
if httpPath == "" {
httpPath = "127.0.0.1:8081"
}
c.HTTPEndpoint = httpPath
} }
c.HTTPEnabled = httpEnabled c.HTTPEndpoint = httpPath
// eth graphql endpoint
ethGraphqlEnabled := viper.GetBool("eth.server.graphql")
if ethGraphqlEnabled {
ethGraphqlPath := viper.GetString("eth.server.graphqlPath")
if ethGraphqlPath == "" {
ethGraphqlPath = "127.0.0.1:8082"
}
c.EthGraphqlEndpoint = ethGraphqlPath
}
c.EthGraphqlEnabled = ethGraphqlEnabled
// ipld graphql endpoint
ipldGraphqlEnabled := viper.GetBool("ipld.server.graphql")
if ipldGraphqlEnabled {
ipldGraphqlPath := viper.GetString("ipld.server.graphqlPath")
if ipldGraphqlPath == "" {
ipldGraphqlPath = "127.0.0.1:8083"
}
c.IpldGraphqlEndpoint = ipldGraphqlPath
ipldPostgraphilePath := viper.GetString("ipld.postgraphilePath")
if ipldPostgraphilePath == "" {
return nil, errors.New("ipld-postgraphile-path parameter is empty")
}
c.IpldPostgraphileEndpoint = ipldPostgraphilePath
tracingHttpEndpoint := viper.GetString("tracing.httpPath")
tracingPostgraphilePath := viper.GetString("tracing.postgraphilePath")
// these two parameters either can be both empty or both set
if (tracingHttpEndpoint == "" && tracingPostgraphilePath != "") || (tracingHttpEndpoint != "" && tracingPostgraphilePath == "") {
return nil, errors.New("tracing.httpPath and tracing.postgraphilePath parameters either can be both empty or both set")
}
c.TracingHttpEndpoint = tracingHttpEndpoint
c.TracingPostgraphileEndpoint = tracingPostgraphilePath
}
c.IpldGraphqlEnabled = ipldGraphqlEnabled
overrideDBConnConfig(&c.DBConfig) overrideDBConnConfig(&c.DBConfig)
serveDB, err := ethServerShared.NewDB(c.DBConfig.DbConnectionString(), c.DBConfig) serveDB := utils.LoadPostgres(c.DBConfig, nodeInfo)
if err != nil { prom.RegisterDBCollector(c.DBConfig.Name, serveDB.DB)
return nil, err c.DB = &serveDB
}
prom.RegisterDBCollector(c.DBConfig.DatabaseName, serveDB)
c.DB = serveDB
defaultSenderStr := viper.GetString("ethereum.defaultSender") defaultSenderStr := viper.GetString("ethereum.defaultSender")
if defaultSenderStr != "" { if defaultSenderStr != "" {
@ -220,17 +123,7 @@ func NewConfig() (*Config, error) {
c.RPCGasCap = rpcGasCap c.RPCGasCap = rpcGasCap
} }
} }
chainConfigPath := viper.GetString("ethereum.chainConfig") c.ChainConfig, err = eth.ChainConfig(nodeInfo.ChainID)
if chainConfigPath != "" {
c.ChainConfig, err = statediff.LoadConfig(chainConfigPath)
} else {
c.ChainConfig, err = statediff.ChainConfig(nodeInfo.ChainID)
}
c.loadGroupCacheConfig()
c.loadValidatorConfig()
return c, err return c, err
} }
@ -239,57 +132,6 @@ func overrideDBConnConfig(con *postgres.Config) {
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS) viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME) viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.server.maxIdle") con.MaxIdle = viper.GetInt("database.server.maxIdle")
con.MaxConns = viper.GetInt("database.server.maxOpen") con.MaxOpen = viper.GetInt("database.server.maxOpen")
con.MaxConnLifetime = time.Duration(viper.GetInt("database.server.maxLifetime")) con.MaxLifetime = viper.GetInt("database.server.maxLifetime")
}
func (c *Config) dbInit() {
viper.BindEnv("database.name", DATABASE_NAME)
viper.BindEnv("database.hostname", DATABASE_HOSTNAME)
viper.BindEnv("database.port", DATABASE_PORT)
viper.BindEnv("database.user", DATABASE_USER)
viper.BindEnv("database.password", DATABASE_PASSWORD)
viper.BindEnv("database.maxIdle", DATABASE_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.maxOpen", DATABASE_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.maxLifetime", DATABASE_MAX_CONN_LIFETIME)
c.DBConfig.DatabaseName = viper.GetString("database.name")
c.DBConfig.Hostname = viper.GetString("database.hostname")
c.DBConfig.Port = viper.GetInt("database.port")
c.DBConfig.Username = viper.GetString("database.user")
c.DBConfig.Password = viper.GetString("database.password")
c.DBConfig.MaxIdle = viper.GetInt("database.maxIdle")
c.DBConfig.MaxConns = viper.GetInt("database.maxOpen")
c.DBConfig.MaxConnLifetime = time.Duration(viper.GetInt("database.maxLifetime"))
}
func (c *Config) loadGroupCacheConfig() {
viper.BindEnv("groupcache.pool.enabled", ethServerShared.GcachePoolEnabled)
viper.BindEnv("groupcache.pool.httpEndpoint", ethServerShared.GcachePoolHttpPath)
viper.BindEnv("groupcache.pool.peerHttpEndpoints", ethServerShared.GcachePoolHttpPeers)
viper.BindEnv("groupcache.statedb.cacheSizeInMB", ethServerShared.GcacheStatedbCacheSize)
viper.BindEnv("groupcache.statedb.cacheExpiryInMins", ethServerShared.GcacheStatedbCacheExpiry)
viper.BindEnv("groupcache.statedb.logStatsIntervalInSecs", ethServerShared.GcacheStatedbLogStatsInterval)
gcc := ethServerShared.GroupCacheConfig{}
gcc.Pool.Enabled = viper.GetBool("groupcache.pool.enabled")
if gcc.Pool.Enabled {
gcc.Pool.HttpEndpoint = viper.GetString("groupcache.pool.httpEndpoint")
gcc.Pool.PeerHttpEndpoints = viper.GetStringSlice("groupcache.pool.peerHttpEndpoints")
}
// Irrespective of whether the pool is enabled, we always use the hot/local cache.
gcc.StateDB.CacheSizeInMB = viper.GetInt("groupcache.statedb.cacheSizeInMB")
gcc.StateDB.CacheExpiryInMins = viper.GetInt("groupcache.statedb.cacheExpiryInMins")
gcc.StateDB.LogStatsIntervalInSecs = viper.GetInt("groupcache.statedb.logStatsIntervalInSecs")
c.GroupCache = &gcc
}
func (c *Config) loadValidatorConfig() {
viper.BindEnv("validator.enabled", VALIDATOR_ENABLED)
viper.BindEnv("validator.everyNthBlock", VALIDATOR_EVERY_NTH_BLOCK)
c.StateValidationEnabled = viper.GetBool("validator.enabled")
c.StateValidationEveryNthBlock = viper.GetUint64("validator.everyNthBlock")
} }

View File

@ -1,50 +0,0 @@
package serve
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/spf13/viper"
)
// Env variables
const (
HTTP_TIMEOUT = "HTTP_TIMEOUT"
ETH_WS_PATH = "ETH_WS_PATH"
ETH_HTTP_PATH = "ETH_HTTP_PATH"
ETH_NODE_ID = "ETH_NODE_ID"
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
ETH_NETWORK_ID = "ETH_NETWORK_ID"
ETH_CHAIN_ID = "ETH_CHAIN_ID"
DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD"
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
)
// GetEthNodeAndClient returns eth node info and client from path url
func getEthNodeAndClient(path string) (node.Info, *rpc.Client, error) {
viper.BindEnv("ethereum.nodeID", ETH_NODE_ID)
viper.BindEnv("ethereum.clientName", ETH_CLIENT_NAME)
viper.BindEnv("ethereum.genesisBlock", ETH_GENESIS_BLOCK)
viper.BindEnv("ethereum.networkID", ETH_NETWORK_ID)
viper.BindEnv("ethereum.chainID", ETH_CHAIN_ID)
rpcClient, err := rpc.Dial(path)
if err != nil {
return node.Info{}, nil, err
}
return node.Info{
ID: viper.GetString("ethereum.nodeID"),
ClientName: viper.GetString("ethereum.clientName"),
GenesisBlock: viper.GetString("ethereum.genesisBlock"),
NetworkID: viper.GetString("ethereum.networkID"),
ChainID: viper.GetUint64("ethereum.chainID"),
}, rpcClient, nil
}

View File

@ -18,21 +18,22 @@ package serve
import ( import (
"fmt" "fmt"
"strconv"
"sync" "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
ethnode "github.com/ethereum/go-ethereum/node" ethnode "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/jmoiron/sqlx"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-server/v3/pkg/eth" eth2 "github.com/vulcanize/ipld-eth-indexer/pkg/eth"
"github.com/vulcanize/ipld-eth-server/v3/pkg/net" "github.com/vulcanize/ipld-eth-indexer/pkg/postgres"
"github.com/vulcanize/ipld-eth-server/pkg/eth"
) )
const ( const (
@ -48,7 +49,7 @@ type Server interface {
APIs() []rpc.API APIs() []rpc.API
Protocols() []p2p.Protocol Protocols() []p2p.Protocol
// Pub-Sub handling event loop // Pub-Sub handling event loop
Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload)
// Method to subscribe to the service // Method to subscribe to the service
Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings) Subscribe(id rpc.ID, sub chan<- SubscriptionPayload, quitChan chan<- bool, params eth.SubscriptionSettings)
// Method to unsubscribe from the service // Method to unsubscribe from the service
@ -74,7 +75,7 @@ type Service struct {
// A mapping of subscription params hash to the corresponding subscription params // A mapping of subscription params hash to the corresponding subscription params
SubscriptionTypes map[common.Hash]eth.SubscriptionSettings SubscriptionTypes map[common.Hash]eth.SubscriptionSettings
// Underlying db // Underlying db
db *sqlx.DB db *postgres.DB
// wg for syncing serve processes // wg for syncing serve processes
serveWg *sync.WaitGroup serveWg *sync.WaitGroup
// rpc client for forwarding cache misses // rpc client for forwarding cache misses
@ -83,12 +84,6 @@ type Service struct {
supportsStateDiffing bool supportsStateDiffing bool
// backend for the server // backend for the server
backend *eth.Backend backend *eth.Backend
// whether to forward eth_calls directly to proxy node
forwardEthCalls bool
// whether to forward all calls to proxy node if they throw an error locally
proxyOnError bool
// eth node network id
nodeNetworkId string
} }
// NewServer creates a new Server using an underlying Service struct // NewServer creates a new Server using an underlying Service struct
@ -103,16 +98,12 @@ func NewServer(settings *Config) (Server, error) {
sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings) sap.SubscriptionTypes = make(map[common.Hash]eth.SubscriptionSettings)
sap.client = settings.Client sap.client = settings.Client
sap.supportsStateDiffing = settings.SupportStateDiff sap.supportsStateDiffing = settings.SupportStateDiff
sap.forwardEthCalls = settings.ForwardEthCalls
sap.proxyOnError = settings.ProxyOnError
sap.nodeNetworkId = settings.NodeNetworkID
var err error var err error
sap.backend, err = eth.NewEthBackend(sap.db, &eth.Config{ sap.backend, err = eth.NewEthBackend(sap.db, &eth.Config{
ChainConfig: settings.ChainConfig, ChainConfig: settings.ChainConfig,
VMConfig: vm.Config{NoBaseFee: true}, VmConfig: vm.Config{},
DefaultSender: settings.DefaultSender, DefaultSender: settings.DefaultSender,
RPCGasCap: settings.RPCGasCap, RPCGasCap: settings.RPCGasCap,
GroupCacheConfig: settings.GroupCache,
}) })
return sap, err return sap, err
} }
@ -124,29 +115,37 @@ func (sap *Service) Protocols() []p2p.Protocol {
// APIs returns the RPC descriptors the watcher service offers // APIs returns the RPC descriptors the watcher service offers
func (sap *Service) APIs() []rpc.API { func (sap *Service) APIs() []rpc.API {
networkID, _ := strconv.ParseUint(sap.nodeNetworkId, 10, 64) infoAPI := NewInfoAPI()
apis := []rpc.API{ apis := []rpc.API{
{ {
Namespace: APIName, Namespace: APIName,
Version: APIVersion, Version: APIVersion,
Service: NewPublicServerAPI(sap, sap.client), Service: NewPublicServerAPI(sap),
Public: true, Public: true,
}, },
{ {
Namespace: net.APIName, Namespace: "rpc",
Version: net.APIVersion, Version: APIVersion,
Service: net.NewPublicNetAPI(networkID, sap.client), Service: infoAPI,
Public: true,
},
{
Namespace: "net",
Version: APIVersion,
Service: infoAPI,
Public: true,
},
{
Namespace: "admin",
Version: APIVersion,
Service: infoAPI,
Public: true, Public: true,
}, },
}
ethAPI, err := eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing, sap.forwardEthCalls, sap.proxyOnError)
if err != nil {
log.Fatalf("unable to create public eth api: %v", err)
} }
return append(apis, rpc.API{ return append(apis, rpc.API{
Namespace: eth.APIName, Namespace: eth.APIName,
Version: eth.APIVersion, Version: eth.APIVersion,
Service: ethAPI, Service: eth.NewPublicEthAPI(sap.backend, sap.client, sap.supportsStateDiffing),
Public: true, Public: true,
}) })
} }
@ -155,7 +154,7 @@ func (sap *Service) APIs() []rpc.API {
// It filters and sends this data to any subscribers to the service // It filters and sends this data to any subscribers to the service
// This process can also be stood up alone, without an screenAndServePayload attached to a Sync process // This process can also be stood up alone, without an screenAndServePayload attached to a Sync process
// and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only // and it will hang on the WaitGroup indefinitely, allowing the Service to serve historical data requests only
func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.ConvertedPayload) { func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth2.ConvertedPayload) {
sap.serveWg = wg sap.serveWg = wg
go func() { go func() {
wg.Add(1) wg.Add(1)
@ -174,7 +173,7 @@ func (sap *Service) Serve(wg *sync.WaitGroup, screenAndServePayload <-chan eth.C
} }
// filterAndServe filters the payload according to each subscription type and sends to the subscriptions // filterAndServe filters the payload according to each subscription type and sends to the subscriptions
func (sap *Service) filterAndServe(payload eth.ConvertedPayload) { func (sap *Service) filterAndServe(payload eth2.ConvertedPayload) {
log.Debug("sending eth ipld payload to subscriptions") log.Debug("sending eth ipld payload to subscriptions")
sap.Lock() sap.Lock()
sap.serveWg.Add(1) sap.serveWg.Add(1)
@ -347,7 +346,7 @@ func (sap *Service) Unsubscribe(id rpc.ID) {
func (sap *Service) Start() error { func (sap *Service) Start() error {
log.Info("starting eth ipld server") log.Info("starting eth ipld server")
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
payloadChan := make(chan eth.ConvertedPayload, PayloadChanBufferSize) payloadChan := make(chan eth2.ConvertedPayload, PayloadChanBufferSize)
sap.Serve(wg, payloadChan) sap.Serve(wg, payloadChan)
return nil return nil
} }

View File

@ -19,11 +19,4 @@ package shared
const ( const (
DefaultMaxBatchSize uint64 = 100 DefaultMaxBatchSize uint64 = 100
DefaultMaxBatchNumber int64 = 50 DefaultMaxBatchNumber int64 = 50
GcachePoolEnabled = "GCACHE_POOL_ENABLED"
GcachePoolHttpPath = "GCACHE_POOL_HTTP_PATH"
GcachePoolHttpPeers = "GCACHE_POOL_HTTP_PEERS"
GcacheStatedbCacheSize = "GCACHE_STATEDB_CACHE_SIZE"
GcacheStatedbCacheExpiry = "GCACHE_STATEDB_CACHE_EXPIRY"
GcacheStatedbLogStatsInterval = "GCACHE_STATEDB_LOG_STATS_INTERVAL"
) )

View File

@ -1,41 +0,0 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
import (
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/jmoiron/sqlx"
)
// NewDB creates a new db connection and initializes the connection pool
func NewDB(connectString string, config postgres.Config) (*sqlx.DB, error) {
db, connectErr := sqlx.Connect("postgres", connectString)
if connectErr != nil {
return nil, postgres.ErrDBConnectionFailed(connectErr)
}
if config.MaxConns > 0 {
db.SetMaxOpenConns(config.MaxConns)
}
if config.MaxIdle > 0 {
db.SetMaxIdleConns(config.MaxIdle)
}
if config.MaxConnLifetime > 0 {
db.SetConnMaxLifetime(config.MaxConnLifetime)
}
return db, nil
}

View File

@ -18,13 +18,13 @@ package shared
import ( import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore" "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help" "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format" node "github.com/ipfs/go-ipld-format"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-indexer/pkg/ipfs/ipld"
) )
// HandleZeroAddrPointer will return an emtpy string for a nil address pointer // HandleZeroAddrPointer will return an emtpy string for a nil address pointer

View File

@ -18,24 +18,12 @@ package shared
import ( import (
"bytes" "bytes"
"context"
"os"
"strconv"
. "github.com/onsi/gomega" "github.com/vulcanize/ipld-eth-indexer/pkg/ipfs"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/jmoiron/sqlx"
) )
// IPLDsContainBytes used to check if a list of strings contains a particular string // IPLDsContainBytes used to check if a list of strings contains a particular string
func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool { func IPLDsContainBytes(iplds []ipfs.BlockModel, b []byte) bool {
for _, ipld := range iplds { for _, ipld := range iplds {
if bytes.Equal(ipld.Data, b) { if bytes.Equal(ipld.Data, b) {
return true return true
@ -43,65 +31,3 @@ func IPLDsContainBytes(iplds []models.IPLDModel, b []byte) bool {
} }
return false return false
} }
// SetupDB is use to setup a db for watcher tests
func SetupDB() *sqlx.DB {
config := getTestDBConfig()
db, err := NewDB(config.DbConnectionString(), config)
Expect(err).NotTo(HaveOccurred())
return db
}
// TearDownDB is used to tear down the watcher dbs after tests
func TearDownDB(db *sqlx.DB) {
tx, err := db.Beginx()
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.header_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.transaction_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.receipt_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.state_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.storage_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM blocks`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth.log_cids`)
Expect(err).NotTo(HaveOccurred())
_, err = tx.Exec(`DELETE FROM eth_meta.watched_addresses`)
Expect(err).NotTo(HaveOccurred())
err = tx.Commit()
Expect(err).NotTo(HaveOccurred())
}
func SetupTestStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, genHash common.Hash) interfaces.StateDiffIndexer {
testInfo := node.Info{
GenesisBlock: genHash.String(),
NetworkID: "1",
ID: "1",
ClientName: "geth",
ChainID: params.TestChainConfig.ChainID.Uint64(),
}
_, stateDiffIndexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, getTestDBConfig())
Expect(err).NotTo(HaveOccurred())
return stateDiffIndexer
}
func getTestDBConfig() postgres.Config {
port, _ := strconv.Atoi(os.Getenv("DATABASE_PORT"))
return postgres.Config{
Hostname: os.Getenv("DATABASE_HOSTNAME"),
DatabaseName: os.Getenv("DATABASE_NAME"),
Username: os.Getenv("DATABASE_USER"),
Password: os.Getenv("DATABASE_PASSWORD"),
Port: port,
Driver: postgres.SQLX,
}
}

View File

@ -1,38 +0,0 @@
// VulcanizeDB
// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package shared
type PoolConfig struct {
Enabled bool
HttpEndpoint string
PeerHttpEndpoints []string
}
type GroupConfig struct {
CacheSizeInMB int
CacheExpiryInMins int
LogStatsIntervalInSecs int
// Used in tests to override the cache name, to work around
// the "duplicate registration of group" error from groupcache
Name string
}
type GroupCacheConfig struct {
Pool PoolConfig
StateDB GroupConfig
}

View File

@ -1,17 +0,0 @@
set -e
set -o xtrace
export ETH_FORWARD_ETH_CALLS=false
export DB_WRITE=true
export ETH_PROXY_ON_ERROR=false
export PGPASSWORD=password
export DATABASE_USER=vdbm
export DATABASE_PORT=8077
export DATABASE_PASSWORD=password
export DATABASE_HOSTNAME=127.0.0.1
# Wait for containers to be up and execute the integration test.
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

View File

@ -1,17 +0,0 @@
set -e
set -o xtrace
export ETH_FORWARD_ETH_CALLS=true
export DB_WRITE=false
export ETH_PROXY_ON_ERROR=false
export PGPASSWORD=password
export DATABASE_USER=vdbm
export DATABASE_PORT=8077
export DATABASE_PASSWORD=password
export DATABASE_HOSTNAME=127.0.0.1
# Wait for containers to be up and execute the integration test.
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8081)" != "200" ]; do echo "waiting for ipld-eth-server..." && sleep 5; done && \
while [ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8545)" != "200" ]; do echo "waiting for geth-statediff..." && sleep 5; done && \
make integrationtest

View File

@ -1,8 +0,0 @@
# Clear up existing docker images and volume.
docker-compose down --remove-orphans --volumes
docker-compose -f docker-compose.yml up -d ipld-eth-db
sleep 10
PGPASSWORD=password DATABASE_USER=vdbm DATABASE_PORT=8077 DATABASE_PASSWORD=password DATABASE_HOSTNAME=127.0.0.1 DATABASE_NAME=vulcanize_testing make test
docker-compose down --remove-orphans --volumes

View File

@ -1,87 +0,0 @@
# Test Insructions
## Setup
- Clone [stack-orchestrator](https://github.com/vulcanize/stack-orchestrator) and [go-ethereum](https://github.com/vulcanize/go-ethereum) repositories.
- Checkout [v3 release](https://github.com/vulcanize/go-ethereum/releases/tag/v1.10.17-statediff-3.2.1) in go-ethereum repo.
```bash
# In go-ethereum repo.
git checkout v1.10.17-statediff-3.2.1
```
- Checkout working commit in stack-orchestrator repo.
```bash
# In stack-orchestrator repo.
git checkout fcbc74451c5494664fe21f765e89c9c6565c07cb
```
## Run
- Run unit tests:
```bash
# In ipld-eth-server root directory.
./scripts/run_unit_test.sh
```
- Run integration tests:
- Update (Replace existing content) config file [config.sh](https://github.com/vulcanize/stack-orchestrator/blob/main/config.sh) in stack-orchestrator repo:
```bash
#!/bin/bash
# Path to go-ethereum repo.
vulcanize_go_ethereum=~/go-ethereum/
# Path to ipld-eth-server repo.
vulcanize_ipld_eth_server=~/ipld-eth-server/
db_write=true
eth_forward_eth_calls=false
eth_proxy_on_error=false
eth_http_path="go-ethereum:8545"
```
- Run stack-orchestrator:
```bash
# In stack-orchestrator root directory.
cd helper-scripts
./wrapper.sh \
-e docker \
-d ../docker/latest/docker-compose-db.yml \
-d ../docker/local/docker-compose-go-ethereum.yml \
-d ../docker/local/docker-compose-ipld-eth-server.yml \
-v remove \
-p ../config.sh
```
- Run test:
```bash
# In ipld-eth-server root directory.
./scripts/run_integration_test.sh
```
- Update `config.sh` file:
```bash
#!/bin/bash
# Path to go-ethereum repo.
vulcanize_go_ethereum=~/go-ethereum/
# Path to ipld-eth-server repo.
vulcanize_ipld_eth_server=~/ipld-eth-server/
db_write=false
eth_forward_eth_calls=true
eth_proxy_on_error=false
eth_http_path="go-ethereum:8545"
```
- Stop the stack-orchestrator and start again using the same command
- Run integration tests for direct proxy fall-through of eth_calls:
```bash
./scripts/run_integration_test_forward_eth_calls.sh
```

View File

@ -1,3 +0,0 @@
node_modules
artifacts
cache

View File

@ -1,5 +0,0 @@
node_modules
#Hardhat files
cache
artifacts

View File

@ -1,14 +0,0 @@
FROM node:14
ARG ETH_ADDR
ENV ETH_ADDR $ETH_ADDR
WORKDIR /usr/src/app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run compile && ls -lah
EXPOSE 3000
ENTRYPOINT ["npm", "start"]

View File

@ -1,10 +0,0 @@
pragma solidity ^0.8.0;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
contract GLDToken is ERC20 {
constructor() ERC20("Gold", "GLD") {
_mint(msg.sender, 1000000000000000000000);
}
function destroy() public {
selfdestruct(payable(msg.sender));
}
}

View File

@ -1,25 +0,0 @@
// SPDX-License-Identifier: AGPL-3.0
pragma solidity ^0.8.0;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
contract SLVToken is ERC20 {
uint256 private countA;
uint256 private countB;
constructor() ERC20("Silver", "SLV") {}
function incrementCountA() public {
countA = countA + 1;
}
function incrementCountB() public {
countB = countB + 1;
}
receive() external payable {}
function destroy() public {
selfdestruct(payable(msg.sender));
}
}

View File

@ -1,43 +0,0 @@
require("@nomiclabs/hardhat-waffle");
// This is a sample Hardhat task. To learn how to create your own go to
// https://hardhat.org/guides/create-task.html
task("accounts", "Prints the list of accounts", async () => {
const accounts = await ethers.getSigners();
for (const account of accounts) {
console.log(account.address);
}
});
// You need to export an object to set up your config
// Go to https://hardhat.org/config/ to learn more
/**
* @type import('hardhat/config').HardhatUserConfig
*/
module.exports = {
solidity: {
version: "0.8.0",
settings: {
outputSelection: {
'*': {
'*': [
'abi', 'storageLayout',
]
}
}
}
},
networks: {
local: {
url: 'http://127.0.0.1:8545',
chainId: 99
},
docker: {
url: process.env.ETH_ADDR,
chainId: 99
}
}
};

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
{
"name": "contract",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"compile": "npx hardhat compile",
"start": "HARDHAT_NETWORK=docker node src/index.js",
"start:local": "ETH_ADDR=http://127.0.0.1:8545 npm run start",
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"description": "",
"dependencies": {
"@openzeppelin/contracts": "^4.0.0",
"fastify": "^3.14.2",
"hardhat": "^2.2.0",
"solidity-create2-deployer": "^0.4.0"
},
"devDependencies": {
"@nomiclabs/hardhat-ethers": "^2.0.2",
"@nomiclabs/hardhat-waffle": "^2.0.1",
"chai": "^4.3.4",
"ethereum-waffle": "^3.3.0",
"ethers": "^5.1.0"
}
}

View File

@ -1,18 +0,0 @@
const hre = require("hardhat");
async function main() {
// await hre.run('compile');
// We get the contract to deploy
const GLDToken = await hre.ethers.getContractFactory("GLDToken");
const token = await GLDToken.deploy();
await token.deployed();
console.log("GLDToken deployed to:", token.address, token.deployTransaction.hash);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main()
.then(() => process.exit(0))
.catch(error => {
console.error(error);
process.exit(1);
});

View File

@ -1,36 +0,0 @@
// We require the Hardhat Runtime Environment explicitly here. This is optional
// but useful for running the script in a standalone fashion through `node <script>`.
//
// When running the script with `hardhat run <script>` you'll find the Hardhat
// Runtime Environment's members available in the global scope.
const hre = require("hardhat");
async function main() {
// Hardhat always runs the compile task when running scripts with its command
// line interface.
//
// If this script is run directly using `node` you may want to call compile
// manually to make sure everything is compiled
// await hre.run('compile');
// We get the contract to deploy
const Greeter = await hre.ethers.getContractFactory("Greeter");
const greeter = await Greeter.deploy("Hello, Hardhat!");
await greeter.deployed();
console.log("Greeter deployed to:", greeter.address, "; tx hash: ", greeter.deployTransaction.hash);
const result = await greeter.setGreeting("Hello 123!");
console.log("Greeter updated", "; tx hash: ", result.hash, "; block hash: ", result.blockHash);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main()
.then(() => process.exit(0))
.catch(error => {
console.error(error);
process.exit(1);
});

Some files were not shown because too many files have changed in this diff Show More