Refactor to use plugeth-statediff #1
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@ -0,0 +1 @@
|
|||||||
|
.git
|
28
.gitea/workflows/publish.yml
Normal file
28
.gitea/workflows/publish.yml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
name: Publish Docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker-build:
|
||||||
|
name: Run docker build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- id: vars
|
||||||
|
name: Output SHA and version tag
|
||||||
|
run: |
|
||||||
|
echo "sha=${GITHUB_SHA:0:7}" >> $GITHUB_OUTPUT
|
||||||
|
echo "tag=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
- name: Build and tag image
|
||||||
|
run: |
|
||||||
|
docker build . \
|
||||||
|
-t cerc-io/eth-statediff-service \
|
||||||
|
-t git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} \
|
||||||
|
-t git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.tag}}
|
||||||
|
- name: Push image tags
|
||||||
|
run: |
|
||||||
|
echo ${{ secrets.GITEA_PUBLISH_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||||
|
docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
||||||
|
docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.tag}}
|
65
.gitea/workflows/tests.yml
Normal file
65
.gitea/workflows/tests.yml
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: '*'
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- ci-test
|
||||||
|
workflow_call:
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
integration-tests:
|
||||||
|
name: Run integration tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
check-latest: true
|
||||||
|
- name: Run dockerd
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: Run DB container
|
||||||
|
run: docker compose -f test/compose.yml up --wait
|
||||||
|
- name: Configure Gitea access
|
||||||
|
env:
|
||||||
|
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||||
|
run: |
|
||||||
|
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"
|
||||||
|
- name: Build package
|
||||||
|
run: go build .
|
||||||
|
# Run a sanity test against the fixture data
|
||||||
|
# Complete integration tests are TODO
|
||||||
|
- name: Run basic integration test
|
||||||
|
env:
|
||||||
|
DATABASE_TYPE: postgres
|
||||||
|
LEVELDB_PATH: ./fixture/chaindata
|
||||||
|
LEVELDB_ANCIENT: ./fixture/chaindata/ancient
|
||||||
|
LOG_FILE_PATH: ./server-log
|
||||||
|
timeout-minutes: 30
|
||||||
|
run: |
|
||||||
|
./eth-statediff-service --config ./test/ci-config.toml serve &
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
./scripts/request-range.sh 0 32 || (E=$?; cat ./server-log; exit $E)
|
||||||
|
|
||||||
|
until grep "Finished processing block 32" ./server-log
|
||||||
|
do sleep 1; done
|
||||||
|
|
||||||
|
count_results() {
|
||||||
|
query="select count(*) from $1;"
|
||||||
|
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -tA cerc_testing -U vdbm -c "$query"
|
||||||
|
}
|
||||||
|
set -x
|
||||||
|
[[ "$(count_results eth.header_cids)" = 33 ]]
|
||||||
|
[[ "$(count_results eth.state_cids)" = 21 ]]
|
||||||
|
[[ "$(count_results eth.storage_cids)" = 18 ]]
|
35
.github/workflows/manual_publish.yml
vendored
35
.github/workflows/manual_publish.yml
vendored
@ -1,35 +0,0 @@
|
|||||||
name: MANUAL Override Publish from release SHA to TAG
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
giteaPublishTag:
|
|
||||||
description: 'Release TAG to publish TO on gitea; e.g. v4.1.5-alpha'
|
|
||||||
required: true
|
|
||||||
cercContainerTag:
|
|
||||||
description: 'Container (truncated!!! SHA) to release-tag FROM'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
|
||||||
jobs:
|
|
||||||
# This workflow contains a single job called "build"
|
|
||||||
build:
|
|
||||||
name: Pull SHA and add release-tag
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=sha::$(echo ${cercContainerTag:0:7})
|
|
||||||
- name: Pull docker image by SHA
|
|
||||||
run: docker pull git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}}
|
|
||||||
- name: Tag docker image TAG
|
|
||||||
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.giteaPublishTag}}
|
|
||||||
- name: Tag docker image TAG
|
|
||||||
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:latest
|
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITEA_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
|
||||||
- name: Docker Push Release Tag
|
|
||||||
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.giteaPublishTag}}
|
|
||||||
- name: Docker Push LATEST Tag
|
|
||||||
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:latest
|
|
73
.github/workflows/on-publish-pr.yml
vendored
73
.github/workflows/on-publish-pr.yml
vendored
@ -1,73 +0,0 @@
|
|||||||
name: Publish Docker image
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
pull_request:
|
|
||||||
jobs:
|
|
||||||
pre_job:
|
|
||||||
# continue-on-error: true # Uncomment once integration is finished
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Map a step output to a job output
|
|
||||||
outputs:
|
|
||||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
|
||||||
steps:
|
|
||||||
- id: skip_check
|
|
||||||
uses: fkirc/skip-duplicate-actions@v4
|
|
||||||
with:
|
|
||||||
# All of these options are optional, so you can remove them if you are happy with the defaults
|
|
||||||
concurrent_skipping: "never"
|
|
||||||
skip_after_successful_duplicate: "true"
|
|
||||||
do_not_skip: '["workflow_dispatch", "schedule"]'
|
|
||||||
run-tests:
|
|
||||||
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
|
|
||||||
needs: pre_job
|
|
||||||
uses: ./.github/workflows/tests.yml
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: |
|
|
||||||
always() &&
|
|
||||||
(needs.run-tests.result == 'success' || needs.run-tests.result == 'skipped') &&
|
|
||||||
github.event_name == 'release'
|
|
||||||
needs: run-tests
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag cerc-io/eth-statediff-service git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Tag docker image TAG
|
|
||||||
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.tag}}
|
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITEA_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
|
||||||
- name: Docker Push
|
|
||||||
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
# push_to_registries:
|
|
||||||
# name: Push Docker image to Docker Hub
|
|
||||||
# runs-on: ubuntu-latest
|
|
||||||
# if: |
|
|
||||||
# always() &&
|
|
||||||
# (needs.build.result == 'success') &&
|
|
||||||
# github.event_name == 'release'
|
|
||||||
# needs: build
|
|
||||||
# steps:
|
|
||||||
# - name: Get the version
|
|
||||||
# id: vars
|
|
||||||
# run: |
|
|
||||||
# echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
# echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
|
||||||
# - name: Docker Login to Github Registry
|
|
||||||
# run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
# - name: Docker Pull
|
|
||||||
# run: docker pull docker.pkg.github.com/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
# - name: Docker Login to Docker Registry
|
|
||||||
# run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
|
||||||
# - name: Tag docker image
|
|
||||||
# run: docker tag docker.pkg.github.com/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} cerc-io/eth-statediff-service:${{steps.vars.outputs.tag}}
|
|
||||||
# - name: Docker Push to Docker Hub
|
|
||||||
# run: docker push cerc-io/eth-statediff-service:${{steps.vars.outputs.tag}}
|
|
37
.github/workflows/tests.yml
vendored
37
.github/workflows/tests.yml
vendored
@ -1,37 +0,0 @@
|
|||||||
name: Tests for Geth that are used in multiple jobs.
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
|
|
||||||
env:
|
|
||||||
GOPATH: /tmp/go
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
|
|
||||||
statediff-unit-test:
|
|
||||||
name: Run statediff unit tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
GO111MODULE: on
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Run unit tests
|
|
||||||
run: |
|
|
||||||
make test
|
|
28
Dockerfile
28
Dockerfile
@ -1,21 +1,24 @@
|
|||||||
FROM golang:1.19-alpine as builder
|
FROM golang:1.19-alpine as builder
|
||||||
|
|
||||||
RUN apk --update --no-cache add make git g++ linux-headers
|
RUN apk add --no-cache git gcc musl-dev binutils-gold
|
||||||
# DEBUG
|
# DEBUG
|
||||||
RUN apk add busybox-extras
|
RUN apk add busybox-extras
|
||||||
|
|
||||||
# Get and build ipfs-blockchain-watcher
|
WORKDIR /eth-statediff-service
|
||||||
ADD . /go/src/github.com/cerc-io/eth-statediff-service
|
|
||||||
#RUN git clone https://github.com/cerc-io/eth-statediff-service.git /go/src/github.com/vulcanize/eth-statediff-service
|
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/cerc-io/eth-statediff-service
|
ARG GIT_VDBTO_TOKEN
|
||||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o eth-statediff-service .
|
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
||||||
|
go mod download && \
|
||||||
|
rm -f ~/.gitconfig
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN go build -ldflags '-extldflags "-static"' -o eth-statediff-service .
|
||||||
|
|
||||||
# app container
|
|
||||||
FROM alpine
|
FROM alpine
|
||||||
|
|
||||||
ARG USER="vdm"
|
ARG USER="vdbm"
|
||||||
ARG CONFIG_FILE="./environments/config.toml"
|
|
||||||
ARG EXPOSE_PORT=8545
|
ARG EXPOSE_PORT=8545
|
||||||
|
|
||||||
RUN adduser -Du 5000 $USER adm
|
RUN adduser -Du 5000 $USER adm
|
||||||
@ -27,12 +30,11 @@ USER $USER
|
|||||||
|
|
||||||
# chown first so dir is writable
|
# chown first so dir is writable
|
||||||
# note: using $USER is merged, but not in the stable release yet
|
# note: using $USER is merged, but not in the stable release yet
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/$CONFIG_FILE config.toml
|
COPY --chown=5000:5000 --from=builder /eth-statediff-service/startup_script.sh .
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/startup_script.sh .
|
COPY --chown=5000:5000 --from=builder /eth-statediff-service/environments environments
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/environments environments
|
|
||||||
|
|
||||||
# keep binaries immutable
|
# keep binaries immutable
|
||||||
COPY --from=builder /go/src/github.com/cerc-io/eth-statediff-service/eth-statediff-service eth-statediff-service
|
COPY --from=builder /eth-statediff-service/eth-statediff-service eth-statediff-service
|
||||||
|
|
||||||
EXPOSE $EXPOSE_PORT
|
EXPOSE $EXPOSE_PORT
|
||||||
|
|
||||||
|
12
Makefile
12
Makefile
@ -1,12 +0,0 @@
|
|||||||
## Build docker image
|
|
||||||
.PHONY: docker-build
|
|
||||||
docker-build:
|
|
||||||
docker build -t cerc-io/eth-statediff-service .
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test:
|
|
||||||
go test -p 1 ./pkg/... -v
|
|
||||||
|
|
||||||
build:
|
|
||||||
go fmt ./...
|
|
||||||
go build
|
|
146
README.md
146
README.md
@ -1,132 +1,22 @@
|
|||||||
# eth-statediff-service
|
# eth-statediff-service
|
||||||
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/eth-statediff-service)](https://goreportcard.com/report/github.com/vulcanize/eth-statediff-service)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/cerc-io/eth-statediff-service)](https://goreportcard.com/report/github.com/cerc-io/eth-statediff-service)
|
||||||
|
|
||||||
>> standalone statediffing service on top of LevelDB
|
A standalone statediffing service which runs directly on top of a `go-ethereum` LevelDB instance.
|
||||||
|
|
||||||
Purpose:
|
|
||||||
|
|
||||||
Stand up a statediffing service directly on top of a go-ethereum LevelDB instance.
|
|
||||||
This service can serve historical state data over the same rpc interface as
|
This service can serve historical state data over the same rpc interface as
|
||||||
[statediffing geth](https://github.com/cerc-io/go-ethereum) without needing to run a full node.
|
[statediffing geth](https://github.com/cerc-io/go-ethereum) without needing to run a full node.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
Build the binary:
|
Configure access to the private Git server at `git.vdb.to`, then build the executable:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make build
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
An example config file:
|
See [./environments/example.toml](./environments/example.toml) for an annotated example config file.
|
||||||
|
|
||||||
```toml
|
|
||||||
[leveldb]
|
|
||||||
# LevelDB access mode <local | remote>
|
|
||||||
mode = "local" # LVLDB_MODE
|
|
||||||
|
|
||||||
# in local mode
|
|
||||||
# LevelDB paths
|
|
||||||
path = "/Users/user/Library/Ethereum/geth/chaindata" # LVLDB_PATH
|
|
||||||
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # LVLDB_ANCIENT
|
|
||||||
|
|
||||||
# in remote mode
|
|
||||||
# URL for leveldb-ethdb-rpc endpoint
|
|
||||||
url = "http://127.0.0.1:8082/" # LVLDB_URL
|
|
||||||
|
|
||||||
[server]
|
|
||||||
ipcPath = ".ipc" # SERVICE_IPC_PATH
|
|
||||||
httpPath = "127.0.0.1:8545" # SERVICE_HTTP_PATH
|
|
||||||
|
|
||||||
[statediff]
|
|
||||||
prerun = true # STATEDIFF_PRERUN
|
|
||||||
serviceWorkers = 1 # STATEDIFF_SERVICE_WORKERS
|
|
||||||
workerQueueSize = 1024 # STATEDIFF_WORKER_QUEUE_SIZE
|
|
||||||
trieWorkers = 4 # STATEDIFF_TRIE_WORKERS
|
|
||||||
|
|
||||||
[prerun]
|
|
||||||
only = false # PRERUN_ONLY
|
|
||||||
parallel = true # PRERUN_PARALLEL
|
|
||||||
|
|
||||||
# to perform prerun in a specific range (optional)
|
|
||||||
start = 0 # PRERUN_RANGE_START
|
|
||||||
stop = 100 # PRERUN_RANGE_STOP
|
|
||||||
|
|
||||||
# to perform prerun over multiple ranges (optional)
|
|
||||||
ranges = [
|
|
||||||
[101, 1000]
|
|
||||||
]
|
|
||||||
|
|
||||||
# statediffing params for prerun
|
|
||||||
[prerun.params]
|
|
||||||
intermediateStateNodes = true # PRERUN_INTERMEDIATE_STATE_NODES
|
|
||||||
intermediateStorageNodes = true # PRERUN_INTERMEDIATE_STORAGE_NODES
|
|
||||||
includeBlock = true # PRERUN_INCLUDE_BLOCK
|
|
||||||
includeReceipts = true # PRERUN_INCLUDE_RECEIPTS
|
|
||||||
includeTD = true # PRERUN_INCLUDE_TD
|
|
||||||
includeCode = true # PRERUN_INCLUDE_CODE
|
|
||||||
watchedAddresses = []
|
|
||||||
|
|
||||||
[log]
|
|
||||||
file = "" # LOG_FILE_PATH
|
|
||||||
level = "info" # LOG_LEVEL
|
|
||||||
|
|
||||||
[database]
|
|
||||||
# output type <postgres | file | dump>
|
|
||||||
type = "postgres"
|
|
||||||
|
|
||||||
# with postgres type
|
|
||||||
# db credentials
|
|
||||||
name = "vulcanize_test" # DATABASE_NAME
|
|
||||||
hostname = "localhost" # DATABASE_HOSTNAME
|
|
||||||
port = 5432 # DATABASE_PORT
|
|
||||||
user = "vulcanize" # DATABASE_USER
|
|
||||||
password = "..." # DATABASE_PASSWORD
|
|
||||||
driver = "sqlx" # DATABASE_DRIVER_TYPE <sqlx | pgx>
|
|
||||||
|
|
||||||
# with file type
|
|
||||||
# file mode <sql | csv>
|
|
||||||
fileMode = "csv" # DATABASE_FILE_MODE
|
|
||||||
|
|
||||||
# with SQL file mode
|
|
||||||
filePath = "" # DATABASE_FILE_PATH
|
|
||||||
|
|
||||||
# with CSV file mode
|
|
||||||
fileCsvDir = "output_dir" # DATABASE_FILE_CSV_DIR
|
|
||||||
|
|
||||||
# with dump type
|
|
||||||
# <stdout | stderr | discard>
|
|
||||||
dumpDestination = "" # DATABASE_DUMP_DST
|
|
||||||
|
|
||||||
[cache]
|
|
||||||
database = 1024 # DB_CACHE_SIZE_MB
|
|
||||||
trie = 1024 # TRIE_CACHE_SIZE_MB
|
|
||||||
|
|
||||||
[prom]
|
|
||||||
# prometheus metrics
|
|
||||||
metrics = true # PROM_METRICS
|
|
||||||
http = true # PROM_HTTP
|
|
||||||
httpAddr = "localhost" # PROM_HTTP_ADDR
|
|
||||||
httpPort = "8889" # PROM_HTTP_PORT
|
|
||||||
dbStats = true # PROM_DB_STATS
|
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
# node info
|
|
||||||
nodeID = "" # ETH_NODE_ID
|
|
||||||
clientName = "eth-statediff-service" # ETH_CLIENT_NAME
|
|
||||||
networkID = 1 # ETH_NETWORK_ID
|
|
||||||
chainID = 1 # ETH_CHAIN_ID
|
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
|
||||||
|
|
||||||
# path to custom chain config file (optional)
|
|
||||||
# keep chainID same as that in chain config file
|
|
||||||
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
|
||||||
|
|
||||||
[debug]
|
|
||||||
pprof = false # DEBUG_PPROF
|
|
||||||
```
|
|
||||||
|
|
||||||
### Local Setup
|
### Local Setup
|
||||||
|
|
||||||
@ -182,14 +72,27 @@ An example config file:
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -X POST -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"statediff_writeStateDiffsInRange","params":['"$BEGIN"', '"$END"', {"intermediateStateNodes":true,"intermediateStorageNodes":true,"includeBlock":true,"includeReceipts":true,"includeTD":true,"includeCode":true}],"id":1}' "$HOST":"$PORT"
|
curl -X POST -H 'Content-Type: application/json' --data '{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "statediff_writeStateDiffsInRange",
|
||||||
|
"params": [0, 1, {
|
||||||
|
"ncludeBlock": true,
|
||||||
|
"includeReceipts": true,
|
||||||
|
"includeTD": true,
|
||||||
|
"includeCode": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"id": 1
|
||||||
|
}' "$HOST":"$PORT"
|
||||||
```
|
```
|
||||||
|
|
||||||
* Prerun:
|
* Prerun:
|
||||||
* The process can be configured locally with sets of ranges to process as a "prerun" to processing directed by the server endpoints.
|
* The process can be configured locally with sets of ranges to process as a "prerun" to
|
||||||
* This is done by turning "prerun" on in the config (`statediff.prerun = true`) and defining ranges and params in the
|
processing directed by the server endpoints.
|
||||||
`prerun` section of the config.
|
* This is done by turning "prerun" on in the config (`statediff.prerun = true`) and defining
|
||||||
* Set the range using `prerun.start` and `prerun.stop`. Use `prerun.ranges` if prerun on more than one range is required.
|
ranges and params in the `prerun` section of the config.
|
||||||
|
* Set the range using `prerun.start` and `prerun.stop`. Use `prerun.ranges` if prerun on more
|
||||||
|
than one range is required.
|
||||||
|
|
||||||
* NOTE: Currently, `params.includeTD` must be set to / passed as `true`.
|
* NOTE: Currently, `params.includeTD` must be set to / passed as `true`.
|
||||||
|
|
||||||
@ -218,7 +121,8 @@ An example config file:
|
|||||||
|
|
||||||
## Import output data in file mode into a database
|
## Import output data in file mode into a database
|
||||||
|
|
||||||
* When `eth-statediff-service` is run in file mode (`database.type`) the output is in form of a SQL file or multiple CSV files.
|
* When `eth-statediff-service` is run in file mode (`database.type`: `file`) the output is in form of a SQL
|
||||||
|
file or multiple CSV files.
|
||||||
|
|
||||||
### SQL
|
### SQL
|
||||||
|
|
||||||
|
20
cmd/env.go
20
cmd/env.go
@ -30,10 +30,10 @@ const (
|
|||||||
|
|
||||||
DB_CACHE_SIZE_MB = "DB_CACHE_SIZE_MB"
|
DB_CACHE_SIZE_MB = "DB_CACHE_SIZE_MB"
|
||||||
TRIE_CACHE_SIZE_MB = "TRIE_CACHE_SIZE_MB"
|
TRIE_CACHE_SIZE_MB = "TRIE_CACHE_SIZE_MB"
|
||||||
LVLDB_MODE = "LVLDB_MODE"
|
LEVELDB_MODE = "LEVELDB_MODE"
|
||||||
LVLDB_PATH = "LVLDB_PATH"
|
LEVELDB_PATH = "LEVELDB_PATH"
|
||||||
LVLDB_ANCIENT = "LVLDB_ANCIENT"
|
LEVELDB_ANCIENT = "LEVELDB_ANCIENT"
|
||||||
LVLDB_URL = "LVLDB_URL"
|
LEVELDB_URL = "LEVELDB_URL"
|
||||||
|
|
||||||
STATEDIFF_PRERUN = "STATEDIFF_PRERUN"
|
STATEDIFF_PRERUN = "STATEDIFF_PRERUN"
|
||||||
STATEDIFF_TRIE_WORKERS = "STATEDIFF_TRIE_WORKERS"
|
STATEDIFF_TRIE_WORKERS = "STATEDIFF_TRIE_WORKERS"
|
||||||
@ -53,8 +53,6 @@ const (
|
|||||||
PRERUN_PARALLEL = "PRERUN_PARALLEL"
|
PRERUN_PARALLEL = "PRERUN_PARALLEL"
|
||||||
PRERUN_RANGE_START = "PRERUN_RANGE_START"
|
PRERUN_RANGE_START = "PRERUN_RANGE_START"
|
||||||
PRERUN_RANGE_STOP = "PRERUN_RANGE_STOP"
|
PRERUN_RANGE_STOP = "PRERUN_RANGE_STOP"
|
||||||
PRERUN_INTERMEDIATE_STATE_NODES = "PRERUN_INTERMEDIATE_STATE_NODES"
|
|
||||||
PRERUN_INTERMEDIATE_STORAGE_NODES = "PRERUN_INTERMEDIATE_STORAGE_NODES"
|
|
||||||
PRERUN_INCLUDE_BLOCK = "PRERUN_INCLUDE_BLOCK"
|
PRERUN_INCLUDE_BLOCK = "PRERUN_INCLUDE_BLOCK"
|
||||||
PRERUN_INCLUDE_RECEIPTS = "PRERUN_INCLUDE_RECEIPTS"
|
PRERUN_INCLUDE_RECEIPTS = "PRERUN_INCLUDE_RECEIPTS"
|
||||||
PRERUN_INCLUDE_TD = "PRERUN_INCLUDE_TD"
|
PRERUN_INCLUDE_TD = "PRERUN_INCLUDE_TD"
|
||||||
@ -121,10 +119,10 @@ func init() {
|
|||||||
viper.BindEnv("cache.database", DB_CACHE_SIZE_MB)
|
viper.BindEnv("cache.database", DB_CACHE_SIZE_MB)
|
||||||
viper.BindEnv("cache.trie", TRIE_CACHE_SIZE_MB)
|
viper.BindEnv("cache.trie", TRIE_CACHE_SIZE_MB)
|
||||||
|
|
||||||
viper.BindEnv("leveldb.mode", LVLDB_MODE)
|
viper.BindEnv("leveldb.mode", LEVELDB_MODE)
|
||||||
viper.BindEnv("leveldb.path", LVLDB_PATH)
|
viper.BindEnv("leveldb.path", LEVELDB_PATH)
|
||||||
viper.BindEnv("leveldb.ancient", LVLDB_ANCIENT)
|
viper.BindEnv("leveldb.ancient", LEVELDB_ANCIENT)
|
||||||
viper.BindEnv("leveldb.url", LVLDB_URL)
|
viper.BindEnv("leveldb.url", LEVELDB_URL)
|
||||||
|
|
||||||
viper.BindEnv("prom.metrics", PROM_METRICS)
|
viper.BindEnv("prom.metrics", PROM_METRICS)
|
||||||
viper.BindEnv("prom.http", PROM_HTTP)
|
viper.BindEnv("prom.http", PROM_HTTP)
|
||||||
@ -141,8 +139,6 @@ func init() {
|
|||||||
viper.BindEnv("prerun.parallel", PRERUN_PARALLEL)
|
viper.BindEnv("prerun.parallel", PRERUN_PARALLEL)
|
||||||
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
||||||
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
||||||
viper.BindEnv("prerun.params.intermediateStateNodes", PRERUN_INTERMEDIATE_STATE_NODES)
|
|
||||||
viper.BindEnv("prerun.params.intermediateStorageNodes", PRERUN_INTERMEDIATE_STORAGE_NODES)
|
|
||||||
viper.BindEnv("prerun.params.includeBlock", PRERUN_INCLUDE_BLOCK)
|
viper.BindEnv("prerun.params.includeBlock", PRERUN_INCLUDE_BLOCK)
|
||||||
viper.BindEnv("prerun.params.includeReceipts", PRERUN_INCLUDE_RECEIPTS)
|
viper.BindEnv("prerun.params.includeReceipts", PRERUN_INCLUDE_RECEIPTS)
|
||||||
viper.BindEnv("prerun.params.includeTD", PRERUN_INCLUDE_TD)
|
viper.BindEnv("prerun.params.includeTD", PRERUN_INCLUDE_TD)
|
||||||
|
25
cmd/root.go
25
cmd/root.go
@ -23,13 +23,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/dump"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/dump"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
@ -49,7 +49,6 @@ var rootCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Execute() {
|
func Execute() {
|
||||||
log.Info("----- Starting vDB -----")
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -126,7 +125,7 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().Int("trie-workers", 0, "number of workers to use for trie traversal and processing")
|
rootCmd.PersistentFlags().Int("trie-workers", 0, "number of workers to use for trie traversal and processing")
|
||||||
rootCmd.PersistentFlags().Int("worker-queue-size", 0, "size of the range request queue for service workers")
|
rootCmd.PersistentFlags().Int("worker-queue-size", 0, "size of the range request queue for service workers")
|
||||||
|
|
||||||
rootCmd.PersistentFlags().String("database-name", "vulcanize_public", "database name")
|
rootCmd.PersistentFlags().String("database-name", "cerc_public", "database name")
|
||||||
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
rootCmd.PersistentFlags().Int("database-port", 5432, "database port")
|
||||||
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
rootCmd.PersistentFlags().String("database-hostname", "localhost", "database hostname")
|
||||||
rootCmd.PersistentFlags().String("database-user", "", "database user")
|
rootCmd.PersistentFlags().String("database-user", "", "database user")
|
||||||
@ -163,8 +162,6 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().Bool("prerun-only", false, "only process pre-configured ranges; exit afterwards")
|
rootCmd.PersistentFlags().Bool("prerun-only", false, "only process pre-configured ranges; exit afterwards")
|
||||||
rootCmd.PersistentFlags().Int("prerun-start", 0, "start height for a prerun range")
|
rootCmd.PersistentFlags().Int("prerun-start", 0, "start height for a prerun range")
|
||||||
rootCmd.PersistentFlags().Int("prerun-stop", 0, "stop height for a prerun range")
|
rootCmd.PersistentFlags().Int("prerun-stop", 0, "stop height for a prerun range")
|
||||||
rootCmd.PersistentFlags().Bool("prerun-intermediate-state-nodes", true, "include intermediate state nodes in state diff")
|
|
||||||
rootCmd.PersistentFlags().Bool("prerun-intermediate-storage-nodes", true, "include intermediate storage nodes in state diff")
|
|
||||||
rootCmd.PersistentFlags().Bool("prerun-include-block", true, "include block data in the statediff payload")
|
rootCmd.PersistentFlags().Bool("prerun-include-block", true, "include block data in the statediff payload")
|
||||||
rootCmd.PersistentFlags().Bool("prerun-include-receipts", true, "include receipts in the statediff payload")
|
rootCmd.PersistentFlags().Bool("prerun-include-receipts", true, "include receipts in the statediff payload")
|
||||||
rootCmd.PersistentFlags().Bool("prerun-include-td", true, "include td in the statediff payload")
|
rootCmd.PersistentFlags().Bool("prerun-include-td", true, "include td in the statediff payload")
|
||||||
@ -224,8 +221,6 @@ func init() {
|
|||||||
viper.BindPFlag("prerun.parallel", rootCmd.PersistentFlags().Lookup("prerun-parallel"))
|
viper.BindPFlag("prerun.parallel", rootCmd.PersistentFlags().Lookup("prerun-parallel"))
|
||||||
viper.BindPFlag("prerun.start", rootCmd.PersistentFlags().Lookup("prerun-start"))
|
viper.BindPFlag("prerun.start", rootCmd.PersistentFlags().Lookup("prerun-start"))
|
||||||
viper.BindPFlag("prerun.stop", rootCmd.PersistentFlags().Lookup("prerun-stop"))
|
viper.BindPFlag("prerun.stop", rootCmd.PersistentFlags().Lookup("prerun-stop"))
|
||||||
viper.BindPFlag("prerun.params.intermediateStateNodes", rootCmd.PersistentFlags().Lookup("prerun-intermediate-state-nodes"))
|
|
||||||
viper.BindPFlag("prerun.params.intermediateStorageNodes", rootCmd.PersistentFlags().Lookup("prerun-intermediate-storage-nodes"))
|
|
||||||
viper.BindPFlag("prerun.params.includeBlock", rootCmd.PersistentFlags().Lookup("prerun-include-block"))
|
viper.BindPFlag("prerun.params.includeBlock", rootCmd.PersistentFlags().Lookup("prerun-include-block"))
|
||||||
viper.BindPFlag("prerun.params.includeReceipts", rootCmd.PersistentFlags().Lookup("prerun-include-receipts"))
|
viper.BindPFlag("prerun.params.includeReceipts", rootCmd.PersistentFlags().Lookup("prerun-include-receipts"))
|
||||||
viper.BindPFlag("prerun.params.includeTD", rootCmd.PersistentFlags().Lookup("prerun-include-td"))
|
viper.BindPFlag("prerun.params.includeTD", rootCmd.PersistentFlags().Lookup("prerun-include-td"))
|
||||||
@ -304,7 +299,7 @@ func getConfig(nodeInfo node.Info) (interfaces.Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("Configuring service for database type: %s", dbType)
|
logWithCommand.Debugf("Configuring service for database type: %s", dbType)
|
||||||
var indexerConfig interfaces.Config
|
var indexerConfig interfaces.Config
|
||||||
switch dbType {
|
switch dbType {
|
||||||
case shared.FILE:
|
case shared.FILE:
|
||||||
@ -344,7 +339,7 @@ func getConfig(nodeInfo node.Info) (interfaces.Config, error) {
|
|||||||
case dump.STDOUT:
|
case dump.STDOUT:
|
||||||
indexerConfig = dump.Config{Dump: os.Stderr}
|
indexerConfig = dump.Config{Dump: os.Stderr}
|
||||||
case dump.DISCARD:
|
case dump.DISCARD:
|
||||||
indexerConfig = dump.Config{Dump: dump.NewDiscardWriterCloser()}
|
indexerConfig = dump.Config{Dump: dump.Discard}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unrecognized dump destination: %s", dumpDst)
|
return nil, fmt.Errorf("unrecognized dump destination: %s", dumpDst)
|
||||||
}
|
}
|
||||||
@ -361,8 +356,6 @@ func getConfig(nodeInfo node.Info) (interfaces.Config, error) {
|
|||||||
DatabaseName: viper.GetString("database.name"),
|
DatabaseName: viper.GetString("database.name"),
|
||||||
Username: viper.GetString("database.user"),
|
Username: viper.GetString("database.user"),
|
||||||
Password: viper.GetString("database.password"),
|
Password: viper.GetString("database.password"),
|
||||||
ID: nodeInfo.ID,
|
|
||||||
ClientName: nodeInfo.ClientName,
|
|
||||||
Driver: driverType,
|
Driver: driverType,
|
||||||
}
|
}
|
||||||
if viper.IsSet("database.maxIdle") {
|
if viper.IsSet("database.maxIdle") {
|
||||||
|
41
cmd/serve.go
41
cmd/serve.go
@ -28,7 +28,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
sd "github.com/cerc-io/eth-statediff-service/pkg"
|
pkg "github.com/cerc-io/eth-statediff-service/pkg"
|
||||||
srpc "github.com/cerc-io/eth-statediff-service/pkg/rpc"
|
srpc "github.com/cerc-io/eth-statediff-service/pkg/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -60,22 +60,14 @@ func maxParallelism() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func serve() {
|
func serve() {
|
||||||
logWithCommand.Info("Running eth-statediff-service serve command")
|
logWithCommand.Debug("Running eth-statediff-service serve command")
|
||||||
logWithCommand.Infof("Parallelism: %d", maxParallelism())
|
logWithCommand.Debugf("Parallelism: %d", maxParallelism())
|
||||||
|
|
||||||
reader, chainConf, nodeInfo := instantiateLevelDBReader()
|
reader, chainConf, nodeInfo := instantiateLevelDBReader()
|
||||||
|
|
||||||
// report latest block info
|
reportLatestBlock(reader)
|
||||||
header, err := reader.GetLatestHeader()
|
|
||||||
if err != nil {
|
|
||||||
logWithCommand.Fatalf("Unable to determine latest header height and hash: %s", err.Error())
|
|
||||||
}
|
|
||||||
if header.Number == nil {
|
|
||||||
logWithCommand.Fatal("Latest header found in levelDB has a nil block height")
|
|
||||||
}
|
|
||||||
logWithCommand.Infof("Latest block found in the levelDB\r\nheight: %s, hash: %s", header.Number.String(), header.Hash().Hex())
|
|
||||||
|
|
||||||
statediffService, err := createStateDiffService(reader, chainConf, nodeInfo)
|
service, err := createStateDiffService(reader, chainConf, nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -93,48 +85,45 @@ func serve() {
|
|||||||
// short circuit if we only want to perform prerun
|
// short circuit if we only want to perform prerun
|
||||||
if viper.GetBool("prerun.only") {
|
if viper.GetBool("prerun.only") {
|
||||||
parallel := viper.GetBool("prerun.parallel")
|
parallel := viper.GetBool("prerun.parallel")
|
||||||
if err := statediffService.Run(nil, parallel); err != nil {
|
if err := service.Run(nil, parallel); err != nil {
|
||||||
logWithCommand.Fatal("Unable to perform prerun: %v", err)
|
logWithCommand.Fatalf("Unable to perform prerun: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// start service and servers
|
// start service and servers
|
||||||
logWithCommand.Info("Starting statediff service")
|
var wg sync.WaitGroup
|
||||||
wg := new(sync.WaitGroup)
|
if err := service.Loop(&wg); err != nil {
|
||||||
if err := statediffService.Loop(wg); err != nil {
|
|
||||||
logWithCommand.Fatalf("unable to start statediff service: %v", err)
|
logWithCommand.Fatalf("unable to start statediff service: %v", err)
|
||||||
}
|
}
|
||||||
logWithCommand.Info("Starting RPC servers")
|
|
||||||
if err := startServers(statediffService); err != nil {
|
if err := startServers(service); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
logWithCommand.Info("RPC servers successfully spun up; awaiting requests")
|
logWithCommand.Debug("RPC servers successfully spun up; awaiting requests")
|
||||||
|
|
||||||
// clean shutdown
|
// clean shutdown
|
||||||
shutdown := make(chan os.Signal)
|
shutdown := make(chan os.Signal)
|
||||||
signal.Notify(shutdown, os.Interrupt)
|
signal.Notify(shutdown, os.Interrupt)
|
||||||
<-shutdown
|
<-shutdown
|
||||||
logWithCommand.Info("Received interrupt signal, shutting down")
|
logWithCommand.Info("Received interrupt signal, shutting down")
|
||||||
statediffService.Stop()
|
service.Stop()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func startServers(serv sd.StateDiffService) error {
|
func startServers(serv *pkg.Service) error {
|
||||||
ipcPath := viper.GetString("server.ipcPath")
|
ipcPath := viper.GetString("server.ipcPath")
|
||||||
httpPath := viper.GetString("server.httpPath")
|
httpPath := viper.GetString("server.httpPath")
|
||||||
if ipcPath == "" && httpPath == "" {
|
if ipcPath == "" && httpPath == "" {
|
||||||
logWithCommand.Fatal("Need an ipc path and/or an http path")
|
logWithCommand.Fatal("Need an IPC path and/or an HTTP path")
|
||||||
}
|
}
|
||||||
if ipcPath != "" {
|
if ipcPath != "" {
|
||||||
logWithCommand.Info("Starting up IPC server")
|
|
||||||
_, _, err := srpc.StartIPCEndpoint(ipcPath, serv.APIs())
|
_, _, err := srpc.StartIPCEndpoint(ipcPath, serv.APIs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if httpPath != "" {
|
if httpPath != "" {
|
||||||
logWithCommand.Info("Starting up HTTP server")
|
|
||||||
_, err := srpc.StartHTTPEndpoint(httpPath, serv.APIs(), []string{"statediff"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
_, err := srpc.StartHTTPEndpoint(httpPath, serv.APIs(), []string{"statediff"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
10
cmd/stats.go
10
cmd/stats.go
@ -42,13 +42,5 @@ func stats() {
|
|||||||
logWithCommand.Info("Running eth-statediff-service stats command")
|
logWithCommand.Info("Running eth-statediff-service stats command")
|
||||||
|
|
||||||
reader, _, _ := instantiateLevelDBReader()
|
reader, _, _ := instantiateLevelDBReader()
|
||||||
|
reportLatestBlock(reader)
|
||||||
header, err := reader.GetLatestHeader()
|
|
||||||
if err != nil {
|
|
||||||
logWithCommand.Fatalf("Unable to determine latest header height and hash: %s", err.Error())
|
|
||||||
}
|
|
||||||
if header.Number == nil {
|
|
||||||
logWithCommand.Fatal("Latest header found in levelDB has a nil block height")
|
|
||||||
}
|
|
||||||
logWithCommand.Infof("Latest block found in the levelDB\r\nheight: %s, hash: %s", header.Number.String(), header.Hash().Hex())
|
|
||||||
}
|
}
|
||||||
|
74
cmd/util.go
74
cmd/util.go
@ -3,31 +3,32 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
|
||||||
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
sd "github.com/cerc-io/eth-statediff-service/pkg"
|
pkg "github.com/cerc-io/eth-statediff-service/pkg"
|
||||||
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
type blockRange [2]uint64
|
type blockRange [2]uint64
|
||||||
|
|
||||||
func createStateDiffService(lvlDBReader sd.Reader, chainConf *params.ChainConfig, nodeInfo node.Info) (sd.StateDiffService, error) {
|
func createStateDiffService(lvlDBReader pkg.Reader, chainConf *params.ChainConfig, nodeInfo node.Info) (*pkg.Service, error) {
|
||||||
// create statediff service
|
// create statediff service
|
||||||
logWithCommand.Info("Setting up database")
|
logWithCommand.Debug("Setting up database")
|
||||||
conf, err := getConfig(nodeInfo)
|
conf, err := getConfig(nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logWithCommand.Info("Creating statediff indexer")
|
logWithCommand.Debug("Creating statediff indexer")
|
||||||
db, indexer, err := ind.NewStateDiffIndexer(context.Background(), chainConf, nodeInfo, conf)
|
db, indexer, err := indexer.NewStateDiffIndexer(context.Background(), chainConf, nodeInfo, conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -35,23 +36,21 @@ func createStateDiffService(lvlDBReader sd.Reader, chainConf *params.ChainConfig
|
|||||||
prom.RegisterDBCollector(viper.GetString("database.name"), db)
|
prom.RegisterDBCollector(viper.GetString("database.name"), db)
|
||||||
}
|
}
|
||||||
|
|
||||||
logWithCommand.Info("Creating statediff service")
|
logWithCommand.Debug("Creating statediff service")
|
||||||
sdConf := sd.Config{
|
sdConf := pkg.ServiceConfig{
|
||||||
ServiceWorkers: viper.GetUint("statediff.serviceWorkers"),
|
ServiceWorkers: viper.GetUint("statediff.serviceWorkers"),
|
||||||
TrieWorkers: viper.GetUint("statediff.trieWorkers"),
|
TrieWorkers: viper.GetUint("statediff.trieWorkers"),
|
||||||
WorkerQueueSize: viper.GetUint("statediff.workerQueueSize"),
|
WorkerQueueSize: viper.GetUint("statediff.workerQueueSize"),
|
||||||
PreRuns: setupPreRunRanges(),
|
PreRuns: setupPreRunRanges(),
|
||||||
}
|
}
|
||||||
return sd.NewStateDiffService(lvlDBReader, indexer, sdConf)
|
return pkg.NewStateDiffService(lvlDBReader, indexer, sdConf), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupPreRunRanges() []sd.RangeRequest {
|
func setupPreRunRanges() []pkg.RangeRequest {
|
||||||
if !viper.GetBool("statediff.prerun") {
|
if !viper.GetBool("statediff.prerun") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
preRunParams := statediff.Params{
|
preRunParams := statediff.Params{
|
||||||
IntermediateStateNodes: viper.GetBool("prerun.params.intermediateStateNodes"),
|
|
||||||
IntermediateStorageNodes: viper.GetBool("prerun.params.intermediateStorageNodes"),
|
|
||||||
IncludeBlock: viper.GetBool("prerun.params.includeBlock"),
|
IncludeBlock: viper.GetBool("prerun.params.includeBlock"),
|
||||||
IncludeReceipts: viper.GetBool("prerun.params.includeReceipts"),
|
IncludeReceipts: viper.GetBool("prerun.params.includeReceipts"),
|
||||||
IncludeTD: viper.GetBool("prerun.params.includeTD"),
|
IncludeTD: viper.GetBool("prerun.params.includeTD"),
|
||||||
@ -66,9 +65,9 @@ func setupPreRunRanges() []sd.RangeRequest {
|
|||||||
preRunParams.WatchedAddresses = addrs
|
preRunParams.WatchedAddresses = addrs
|
||||||
var rawRanges []blockRange
|
var rawRanges []blockRange
|
||||||
viper.UnmarshalKey("prerun.ranges", &rawRanges)
|
viper.UnmarshalKey("prerun.ranges", &rawRanges)
|
||||||
blockRanges := make([]sd.RangeRequest, len(rawRanges))
|
blockRanges := make([]pkg.RangeRequest, len(rawRanges))
|
||||||
for i, rawRange := range rawRanges {
|
for i, rawRange := range rawRanges {
|
||||||
blockRanges[i] = sd.RangeRequest{
|
blockRanges[i] = pkg.RangeRequest{
|
||||||
Start: rawRange[0],
|
Start: rawRange[0],
|
||||||
Stop: rawRange[1],
|
Stop: rawRange[1],
|
||||||
Params: preRunParams,
|
Params: preRunParams,
|
||||||
@ -77,7 +76,7 @@ func setupPreRunRanges() []sd.RangeRequest {
|
|||||||
if viper.IsSet("prerun.start") && viper.IsSet("prerun.stop") {
|
if viper.IsSet("prerun.start") && viper.IsSet("prerun.stop") {
|
||||||
hardStart := viper.GetInt("prerun.start")
|
hardStart := viper.GetInt("prerun.start")
|
||||||
hardStop := viper.GetInt("prerun.stop")
|
hardStop := viper.GetInt("prerun.stop")
|
||||||
blockRanges = append(blockRanges, sd.RangeRequest{
|
blockRanges = append(blockRanges, pkg.RangeRequest{
|
||||||
Start: uint64(hardStart),
|
Start: uint64(hardStart),
|
||||||
Stop: uint64(hardStop),
|
Stop: uint64(hardStop),
|
||||||
Params: preRunParams,
|
Params: preRunParams,
|
||||||
@ -87,9 +86,9 @@ func setupPreRunRanges() []sd.RangeRequest {
|
|||||||
return blockRanges
|
return blockRanges
|
||||||
}
|
}
|
||||||
|
|
||||||
func instantiateLevelDBReader() (sd.Reader, *params.ChainConfig, node.Info) {
|
func instantiateLevelDBReader() (pkg.Reader, *params.ChainConfig, node.Info) {
|
||||||
// load some necessary params
|
// load some necessary params
|
||||||
logWithCommand.Info("Loading statediff service parameters")
|
logWithCommand.Debug("Loading statediff service parameters")
|
||||||
mode := viper.GetString("leveldb.mode")
|
mode := viper.GetString("leveldb.mode")
|
||||||
path := viper.GetString("leveldb.path")
|
path := viper.GetString("leveldb.path")
|
||||||
ancientPath := viper.GetString("leveldb.ancient")
|
ancientPath := viper.GetString("leveldb.ancient")
|
||||||
@ -109,23 +108,15 @@ func instantiateLevelDBReader() (sd.Reader, *params.ChainConfig, node.Info) {
|
|||||||
|
|
||||||
nodeInfo := getEthNodeInfo()
|
nodeInfo := getEthNodeInfo()
|
||||||
|
|
||||||
var chainConf *params.ChainConfig
|
|
||||||
var err error
|
|
||||||
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||||
|
chainConf, err := utils.LoadConfig(chainConfigPath)
|
||||||
if chainConfigPath != "" {
|
|
||||||
chainConf, err = statediff.LoadConfig(chainConfigPath)
|
|
||||||
} else {
|
|
||||||
chainConf, err = statediff.ChainConfig(nodeInfo.ChainID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatalf("Unable to instantiate chain config: %s", err.Error())
|
logWithCommand.Fatalf("Unable to instantiate chain config: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create LevelDB reader
|
// create LevelDB reader
|
||||||
logWithCommand.Info("Creating LevelDB reader")
|
logWithCommand.Debug("Creating LevelDB reader")
|
||||||
readerConf := sd.LvLDBReaderConfig{
|
readerConf := pkg.LvLDBReaderConfig{
|
||||||
TrieConfig: &trie.Config{
|
TrieConfig: &trie.Config{
|
||||||
Cache: viper.GetInt("cache.trie"),
|
Cache: viper.GetInt("cache.trie"),
|
||||||
Journal: "",
|
Journal: "",
|
||||||
@ -138,9 +129,24 @@ func instantiateLevelDBReader() (sd.Reader, *params.ChainConfig, node.Info) {
|
|||||||
Url: url,
|
Url: url,
|
||||||
DBCacheSize: viper.GetInt("cache.database"),
|
DBCacheSize: viper.GetInt("cache.database"),
|
||||||
}
|
}
|
||||||
reader, err := sd.NewLvlDBReader(readerConf)
|
reader, err := pkg.NewLvlDBReader(readerConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatalf("Unable to instantiate levelDB reader: %s", err.Error())
|
logWithCommand.Fatalf("Unable to instantiate levelDB reader: %s", err)
|
||||||
}
|
}
|
||||||
return reader, chainConf, nodeInfo
|
return reader, chainConf, nodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// report latest block info
|
||||||
|
func reportLatestBlock(reader pkg.Reader) {
|
||||||
|
header, err := reader.GetLatestHeader()
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatalf("Unable to determine latest header height and hash: %s", err.Error())
|
||||||
|
}
|
||||||
|
if header.Number == nil {
|
||||||
|
logWithCommand.Fatal("Latest header found in levelDB has a nil block height")
|
||||||
|
}
|
||||||
|
logWithCommand.
|
||||||
|
WithField("height", header.Number).
|
||||||
|
WithField("hash", header.Hash()).
|
||||||
|
Info("Latest block found in levelDB")
|
||||||
|
}
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
version: '3.2'
|
|
||||||
|
|
||||||
services:
|
|
||||||
eth-statediff-service:
|
|
||||||
build:
|
|
||||||
context: ./
|
|
||||||
cache_from:
|
|
||||||
- alpine:latest
|
|
||||||
- golang:1.16
|
|
||||||
dockerfile: ./Dockerfile
|
|
||||||
args:
|
|
||||||
USER: "vdbm"
|
|
||||||
CONFIG_FILE: ./environments/example.toml
|
|
||||||
EXPOSE_PORT: 8545
|
|
||||||
environment:
|
|
||||||
- VDB_COMMAND=serve
|
|
||||||
volumes:
|
|
||||||
- eth-statediff-service-data:/root/.ethereum/
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:8545:8545"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
eth-statediff-service-data:
|
|
@ -1,58 +0,0 @@
|
|||||||
[leveldb]
|
|
||||||
mode = "local"
|
|
||||||
path = "/app/geth-rw/chaindata"
|
|
||||||
ancient = "/app/geth-rw/chaindata/ancient"
|
|
||||||
|
|
||||||
[server]
|
|
||||||
ipcPath = ""
|
|
||||||
httpPath = "0.0.0.0:8545"
|
|
||||||
|
|
||||||
[statediff]
|
|
||||||
prerun = true
|
|
||||||
serviceWorkers = 1
|
|
||||||
workerQueueSize = 1024
|
|
||||||
trieWorkers = 16
|
|
||||||
|
|
||||||
[prerun]
|
|
||||||
only = true
|
|
||||||
ranges = []
|
|
||||||
[prerun.params]
|
|
||||||
intermediateStateNodes = true
|
|
||||||
intermediateStorageNodes = true
|
|
||||||
includeBlock = true
|
|
||||||
includeReceipts = true
|
|
||||||
includeTD = true
|
|
||||||
includeCode = true
|
|
||||||
watchedAddresses = []
|
|
||||||
|
|
||||||
[log]
|
|
||||||
file = ""
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
[database]
|
|
||||||
type = "postgres"
|
|
||||||
name = ""
|
|
||||||
hostname = ""
|
|
||||||
port = 5432
|
|
||||||
user = ""
|
|
||||||
password = ""
|
|
||||||
driver = "sqlx"
|
|
||||||
|
|
||||||
[cache]
|
|
||||||
database = 1024
|
|
||||||
trie = 4096
|
|
||||||
|
|
||||||
[prom]
|
|
||||||
metrics = true
|
|
||||||
http = true
|
|
||||||
httpAddr = "0.0.0.0"
|
|
||||||
httpPort = 9100
|
|
||||||
dbStats = false
|
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
nodeID = ""
|
|
||||||
clientName = "eth-statediff-service"
|
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
|
||||||
networkID = 1
|
|
||||||
chainID = 1
|
|
||||||
chainConfig = ""
|
|
@ -1,63 +1,104 @@
|
|||||||
[leveldb]
|
[leveldb]
|
||||||
mode = "local"
|
# LevelDB access mode <local | remote>
|
||||||
path = "/Users/user/Library/Ethereum/geth/chaindata"
|
mode = "local" # LEVELDB_MODE
|
||||||
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient"
|
|
||||||
url = "http://127.0.0.1:8082/"
|
# LevelDB paths (local mode)
|
||||||
|
path = "/Users/user/Library/Ethereum/geth/chaindata" # LEVELDB_PATH
|
||||||
|
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # LEVELDB_ANCIENT
|
||||||
|
|
||||||
|
# URL for leveldb-ethdb-rpc endpoint (remote mode)
|
||||||
|
url = "http://127.0.0.1:8082/" # LEVELDB_URL
|
||||||
|
|
||||||
[server]
|
[server]
|
||||||
ipcPath = ".ipc"
|
ipcPath = ".ipc" # SERVICE_IPC_PATH
|
||||||
httpPath = "127.0.0.1:8545"
|
httpPath = "127.0.0.1:8545" # SERVICE_HTTP_PATH
|
||||||
|
|
||||||
[statediff]
|
[statediff]
|
||||||
prerun = true
|
prerun = true # STATEDIFF_PRERUN
|
||||||
serviceWorkers = 1
|
serviceWorkers = 1 # STATEDIFF_SERVICE_WORKERS
|
||||||
workerQueueSize = 1024
|
workerQueueSize = 1024 # STATEDIFF_WORKER_QUEUE_SIZE
|
||||||
trieWorkers = 4
|
trieWorkers = 4 # STATEDIFF_TRIE_WORKERS
|
||||||
|
|
||||||
[prerun]
|
[prerun]
|
||||||
only = false
|
only = false # PRERUN_ONLY
|
||||||
|
parallel = true # PRERUN_PARALLEL
|
||||||
|
|
||||||
|
# to perform prerun in a specific range (optional)
|
||||||
|
start = 0 # PRERUN_RANGE_START
|
||||||
|
stop = 100 # PRERUN_RANGE_STOP
|
||||||
|
|
||||||
|
# to perform prerun over multiple ranges (optional)
|
||||||
ranges = [
|
ranges = [
|
||||||
[0, 1000]
|
[101, 1000]
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# statediffing params for prerun
|
||||||
[prerun.params]
|
[prerun.params]
|
||||||
intermediateStateNodes = true
|
intermediateStateNodes = true # PRERUN_INTERMEDIATE_STATE_NODES
|
||||||
intermediateStorageNodes = true
|
intermediateStorageNodes = true # PRERUN_INTERMEDIATE_STORAGE_NODES
|
||||||
includeBlock = true
|
includeBlock = true # PRERUN_INCLUDE_BLOCK
|
||||||
includeReceipts = true
|
includeReceipts = true # PRERUN_INCLUDE_RECEIPTS
|
||||||
includeTD = true
|
includeTD = true # PRERUN_INCLUDE_TD
|
||||||
includeCode = true
|
includeCode = true # PRERUN_INCLUDE_CODE
|
||||||
watchedAddresses = []
|
watchedAddresses = []
|
||||||
|
|
||||||
[log]
|
[log]
|
||||||
file = ""
|
# Leave empty to output to stdout
|
||||||
level = "info"
|
file = "" # LOG_FILE_PATH
|
||||||
|
level = "info" # LOG_LEVEL
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
name = "vulcanize_test"
|
# output type <postgres | file | dump>
|
||||||
hostname = "localhost"
|
|
||||||
port = 5432
|
|
||||||
user = "vulcanize"
|
|
||||||
password = "..."
|
|
||||||
type = "postgres"
|
type = "postgres"
|
||||||
driver = "sqlx"
|
|
||||||
dumpDestination = ""
|
# with postgres type
|
||||||
filePath = ""
|
# db credentials
|
||||||
|
name = "vulcanize_test" # DATABASE_NAME
|
||||||
|
hostname = "localhost" # DATABASE_HOSTNAME
|
||||||
|
port = 5432 # DATABASE_PORT
|
||||||
|
user = "vulcanize" # DATABASE_USER
|
||||||
|
password = "..." # DATABASE_PASSWORD
|
||||||
|
# SQL backend to use: <sqlx | pgx>
|
||||||
|
driver = "sqlx" # DATABASE_DRIVER_TYPE
|
||||||
|
|
||||||
|
# with file type
|
||||||
|
# file mode <sql | csv>
|
||||||
|
fileMode = "csv" # DATABASE_FILE_MODE
|
||||||
|
|
||||||
|
# with SQL file mode
|
||||||
|
filePath = "" # DATABASE_FILE_PATH
|
||||||
|
|
||||||
|
# with CSV file mode
|
||||||
|
fileCsvDir = "output_dir" # DATABASE_FILE_CSV_DIR
|
||||||
|
|
||||||
|
# with dump type
|
||||||
|
# <stdout | stderr | discard>
|
||||||
|
dumpDestination = "" # DATABASE_DUMP_DST
|
||||||
|
|
||||||
[cache]
|
[cache]
|
||||||
database = 1024
|
# settings for geth internal caches
|
||||||
trie = 1024
|
database = 1024 # DB_CACHE_SIZE_MB
|
||||||
|
trie = 1024 # TRIE_CACHE_SIZE_MB
|
||||||
|
|
||||||
[prom]
|
[prom]
|
||||||
dbStats = false
|
# prometheus metrics
|
||||||
metrics = true
|
metrics = true # PROM_METRICS
|
||||||
http = true
|
http = true # PROM_HTTP
|
||||||
httpAddr = "localhost"
|
httpAddr = "localhost" # PROM_HTTP_ADDR
|
||||||
httpPort = "8889"
|
httpPort = "8889" # PROM_HTTP_PORT
|
||||||
|
dbStats = true # PROM_DB_STATS
|
||||||
|
|
||||||
[ethereum]
|
[ethereum]
|
||||||
chainConfig = ""
|
# Identifiers for ethereum node
|
||||||
nodeID = ""
|
nodeID = "" # ETH_NODE_ID
|
||||||
clientName = "eth-statediff-service"
|
clientName = "eth-statediff-service" # ETH_CLIENT_NAME
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
networkID = 1 # ETH_NETWORK_ID
|
||||||
networkID = 1
|
chainID = 1 # ETH_CHAIN_ID
|
||||||
chainID = 1
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
||||||
|
|
||||||
|
# Path to custom chain config file (optional)
|
||||||
|
# chainID should match that in this config file
|
||||||
|
chainConfig = "chain.json" # ETH_CHAIN_CONFIG
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
pprof = false # DEBUG_PPROF
|
||||||
|
6
fixture/.gitignore
vendored
Normal file
6
fixture/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
*/*.log
|
||||||
|
*/CURRENT*
|
||||||
|
*/LOCK
|
||||||
|
*/LOG
|
||||||
|
*/MANIFEST-*
|
||||||
|
*/ancient/FLOCK
|
BIN
fixture/chaindata/000002.ldb
Normal file
BIN
fixture/chaindata/000002.ldb
Normal file
Binary file not shown.
BIN
fixture/chaindata/000004.ldb
Normal file
BIN
fixture/chaindata/000004.ldb
Normal file
Binary file not shown.
0
fixture/chaindata/ancient/bodies.0000.cdat
Normal file
0
fixture/chaindata/ancient/bodies.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/bodies.cidx
Normal file
BIN
fixture/chaindata/ancient/bodies.cidx
Normal file
Binary file not shown.
1
fixture/chaindata/ancient/bodies.meta
Normal file
1
fixture/chaindata/ancient/bodies.meta
Normal file
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD><01>
|
0
fixture/chaindata/ancient/diffs.0000.rdat
Normal file
0
fixture/chaindata/ancient/diffs.0000.rdat
Normal file
1
fixture/chaindata/ancient/diffs.meta
Normal file
1
fixture/chaindata/ancient/diffs.meta
Normal file
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD><01>
|
BIN
fixture/chaindata/ancient/diffs.ridx
Normal file
BIN
fixture/chaindata/ancient/diffs.ridx
Normal file
Binary file not shown.
0
fixture/chaindata/ancient/hashes.0000.rdat
Normal file
0
fixture/chaindata/ancient/hashes.0000.rdat
Normal file
1
fixture/chaindata/ancient/hashes.meta
Normal file
1
fixture/chaindata/ancient/hashes.meta
Normal file
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD><01>
|
BIN
fixture/chaindata/ancient/hashes.ridx
Normal file
BIN
fixture/chaindata/ancient/hashes.ridx
Normal file
Binary file not shown.
0
fixture/chaindata/ancient/headers.0000.cdat
Normal file
0
fixture/chaindata/ancient/headers.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/headers.cidx
Normal file
BIN
fixture/chaindata/ancient/headers.cidx
Normal file
Binary file not shown.
1
fixture/chaindata/ancient/headers.meta
Normal file
1
fixture/chaindata/ancient/headers.meta
Normal file
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD><01>
|
0
fixture/chaindata/ancient/receipts.0000.cdat
Normal file
0
fixture/chaindata/ancient/receipts.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/receipts.cidx
Normal file
BIN
fixture/chaindata/ancient/receipts.cidx
Normal file
Binary file not shown.
1
fixture/chaindata/ancient/receipts.meta
Normal file
1
fixture/chaindata/ancient/receipts.meta
Normal file
@ -0,0 +1 @@
|
|||||||
|
<EFBFBD><01>
|
145
go.mod
145
go.mod
@ -1,69 +1,62 @@
|
|||||||
module github.com/cerc-io/eth-statediff-service
|
module github.com/cerc-io/eth-statediff-service
|
||||||
|
|
||||||
go 1.18
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cerc-io/leveldb-ethdb-rpc v1.1.13
|
github.com/cerc-io/leveldb-ethdb-rpc v1.1.13
|
||||||
github.com/ethereum/go-ethereum v1.11.5
|
github.com/cerc-io/plugeth-statediff v0.0.0-00010101000000-000000000000
|
||||||
|
github.com/ethereum/go-ethereum v1.12.0
|
||||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||||
github.com/prometheus/client_golang v1.14.0
|
github.com/prometheus/client_golang v1.16.0
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/spf13/cobra v1.3.0
|
github.com/spf13/cobra v1.3.0
|
||||||
github.com/spf13/viper v1.10.1
|
github.com/spf13/viper v1.10.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/DataDog/zstd v1.5.2 // indirect
|
github.com/DataDog/zstd v1.5.5 // indirect
|
||||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||||
|
github.com/cerc-io/eth-iterator-utils v1.2.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/cockroachdb/errors v1.9.1 // indirect
|
github.com/cockroachdb/errors v1.10.0 // indirect
|
||||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
github.com/cockroachdb/pebble v0.0.0-20230720154706-692f3b61a3c4 // indirect
|
||||||
github.com/cockroachdb/redact v1.1.3 // indirect
|
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||||
|
github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
github.com/deckarep/golang-set/v2 v2.3.0 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
github.com/fjl/memsize v0.0.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||||
github.com/georgysavva/scany v1.2.1 // indirect
|
github.com/georgysavva/scany v0.2.9 // indirect
|
||||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
github.com/getsentry/sentry-go v0.22.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/go-stack/stack v1.8.1 // indirect
|
github.com/go-stack/stack v1.8.1 // indirect
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gorilla/websocket v1.4.2 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
github.com/hashicorp/go-bexpr v0.1.12 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||||
github.com/holiman/uint256 v1.2.0 // indirect
|
github.com/holiman/uint256 v1.2.3 // indirect
|
||||||
github.com/huin/goupnp v1.0.3 // indirect
|
github.com/huin/goupnp v1.2.0 // indirect
|
||||||
|
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/influxdata/influxdb v1.8.3 // indirect
|
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||||
github.com/ipfs/go-block-format v0.0.3 // indirect
|
|
||||||
github.com/ipfs/go-cid v0.2.0 // indirect
|
|
||||||
github.com/ipfs/go-datastore v0.5.1 // indirect
|
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect
|
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect
|
|
||||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
|
||||||
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
|
||||||
github.com/ipfs/go-log v1.0.5 // indirect
|
|
||||||
github.com/ipfs/go-log/v2 v2.1.3 // indirect
|
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.10.0 // indirect
|
github.com/jackc/pgconn v1.10.0 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
@ -74,70 +67,80 @@ require (
|
|||||||
github.com/jackc/pgx/v4 v4.13.0 // indirect
|
github.com/jackc/pgx/v4 v4.13.0 // indirect
|
||||||
github.com/jackc/puddle v1.1.3 // indirect
|
github.com/jackc/puddle v1.1.3 // indirect
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
github.com/klauspost/compress v1.16.7 // indirect
|
||||||
github.com/klauspost/compress v1.15.15 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/lib/pq v1.10.7 // indirect
|
github.com/lib/pq v1.10.9 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.5 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
github.com/mitchellh/pointerstructure v1.2.1 // indirect
|
||||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.0.3 // indirect
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multihash v0.1.0 // indirect
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
|
github.com/openrelayxyz/plugeth-utils v1.2.0 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||||
github.com/pganalyze/pg_query_go/v2 v2.2.0 // indirect
|
github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_model v0.3.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/common v0.39.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
github.com/prometheus/procfs v0.11.0 // indirect
|
||||||
github.com/rs/cors v1.7.0 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||||
|
github.com/rs/cors v1.9.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||||
|
github.com/shopspring/decimal v1.2.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/spf13/afero v1.6.0 // indirect
|
github.com/spf13/afero v1.6.0 // indirect
|
||||||
github.com/spf13/cast v1.4.1 // indirect
|
github.com/spf13/cast v1.4.1 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||||
|
github.com/stretchr/objx v0.5.0 // indirect
|
||||||
|
github.com/stretchr/testify v1.8.2 // indirect
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
||||||
github.com/thoas/go-funk v0.9.3 // indirect
|
github.com/thoas/go-funk v0.9.3 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect
|
github.com/urfave/cli/v2 v2.25.7 // indirect
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
golang.org/x/crypto v0.11.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
|
||||||
go.uber.org/zap v1.17.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
golang.org/x/crypto v0.6.0 // indirect
|
golang.org/x/sync v0.3.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
golang.org/x/sys v0.10.0 // indirect
|
||||||
golang.org/x/net v0.6.0 // indirect
|
golang.org/x/term v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/text v0.11.0 // indirect
|
||||||
golang.org/x/sys v0.5.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
golang.org/x/text v0.7.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
lukechampine.com/blake3 v1.1.6 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
lukechampine.com/blake3 v1.2.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.11.5 => github.com/cerc-io/go-ethereum v1.11.5-statediff-4.3.9-alpha
|
replace (
|
||||||
|
github.com/cerc-io/eth-iterator-utils => git.vdb.to/cerc-io/eth-iterator-utils v0.1.2
|
||||||
|
github.com/cerc-io/eth-testing => git.vdb.to/cerc-io/eth-testing v0.3.1
|
||||||
|
github.com/cerc-io/plugeth-statediff => git.vdb.to/cerc-io/plugeth-statediff v0.1.3
|
||||||
|
github.com/ethereum/go-ethereum => git.vdb.to/cerc-io/plugeth v0.0.0-20230808125822-691dc334fab1
|
||||||
|
github.com/openrelayxyz/plugeth-utils => git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46
|
||||||
|
)
|
||||||
|
11
pkg/api.go
11
pkg/api.go
@ -18,7 +18,7 @@ package statediff
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
sd "github.com/ethereum/go-ethereum/statediff"
|
sd "github.com/cerc-io/plugeth-statediff"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace used for the state diffing service API
|
// APIName is the namespace used for the state diffing service API
|
||||||
@ -30,11 +30,11 @@ const APIVersion = "0.0.1"
|
|||||||
// PublicStateDiffAPI provides an RPC interface
|
// PublicStateDiffAPI provides an RPC interface
|
||||||
// that can be used to fetch historical diffs from LevelDB directly
|
// that can be used to fetch historical diffs from LevelDB directly
|
||||||
type PublicStateDiffAPI struct {
|
type PublicStateDiffAPI struct {
|
||||||
sds StateDiffService
|
sds *Service
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublicStateDiffAPI creates an rpc interface for the underlying statediff service
|
// NewPublicStateDiffAPI creates an rpc interface for the underlying statediff service
|
||||||
func NewPublicStateDiffAPI(sds StateDiffService) *PublicStateDiffAPI {
|
func NewPublicStateDiffAPI(sds *Service) *PublicStateDiffAPI {
|
||||||
return &PublicStateDiffAPI{
|
return &PublicStateDiffAPI{
|
||||||
sds: sds,
|
sds: sds,
|
||||||
}
|
}
|
||||||
@ -45,11 +45,6 @@ func (api *PublicStateDiffAPI) StateDiffAt(ctx context.Context, blockNumber uint
|
|||||||
return api.sds.StateDiffAt(blockNumber, params)
|
return api.sds.StateDiffAt(blockNumber, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateTrieAt returns a state trie payload at the specific blockheight
|
|
||||||
func (api *PublicStateDiffAPI) StateTrieAt(ctx context.Context, blockNumber uint64, params sd.Params) (*sd.Payload, error) {
|
|
||||||
return api.sds.StateTrieAt(blockNumber, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStateDiffAt writes a state diff object directly to DB at the specific blockheight
|
// WriteStateDiffAt writes a state diff object directly to DB at the specific blockheight
|
||||||
func (api *PublicStateDiffAPI) WriteStateDiffAt(ctx context.Context, blockNumber uint64, params sd.Params) error {
|
func (api *PublicStateDiffAPI) WriteStateDiffAt(ctx context.Context, blockNumber uint64, params sd.Params) error {
|
||||||
return api.sds.WriteStateDiffAt(blockNumber, params)
|
return api.sds.WriteStateDiffAt(blockNumber, params)
|
||||||
|
156
pkg/builder.go
156
pkg/builder.go
@ -1,156 +0,0 @@
|
|||||||
// Copyright 2019 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// Contains a batch of utility type declarations used by the tests. As the node
|
|
||||||
// operates on unique types, a lot of them are needed to check various features.
|
|
||||||
|
|
||||||
package statediff
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/bits"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
sd "github.com/ethereum/go-ethereum/statediff"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
iter "github.com/ethereum/go-ethereum/trie/concurrent_iterator"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type builder struct {
|
|
||||||
sd.StateDiffBuilder
|
|
||||||
numWorkers uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuilder is used to create a statediff builder
|
|
||||||
func NewBuilder(stateCache state.Database, workers uint) (sd.Builder, error) {
|
|
||||||
if workers == 0 {
|
|
||||||
workers = 1
|
|
||||||
}
|
|
||||||
if bits.OnesCount(workers) != 1 {
|
|
||||||
return nil, fmt.Errorf("workers must be a power of 2")
|
|
||||||
}
|
|
||||||
return &builder{
|
|
||||||
StateDiffBuilder: sd.StateDiffBuilder{
|
|
||||||
StateCache: stateCache,
|
|
||||||
},
|
|
||||||
numWorkers: workers,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
|
||||||
func (sdb *builder) BuildStateDiffObject(args sd.Args, params sd.Params) (sdtypes.StateObject, error) {
|
|
||||||
var stateNodes []sdtypes.StateNode
|
|
||||||
var codeAndCodeHashes []sdtypes.CodeAndCodeHash
|
|
||||||
err := sdb.WriteStateDiffObject(
|
|
||||||
args,
|
|
||||||
params, sd.StateNodeAppender(&stateNodes), sd.CodeMappingAppender(&codeAndCodeHashes))
|
|
||||||
if err != nil {
|
|
||||||
return sdtypes.StateObject{}, err
|
|
||||||
}
|
|
||||||
return sdtypes.StateObject{
|
|
||||||
BlockHash: args.BlockHash,
|
|
||||||
BlockNumber: args.BlockNumber,
|
|
||||||
Nodes: stateNodes,
|
|
||||||
CodeAndCodeHashes: codeAndCodeHashes,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStateDiffObject writes a statediff object to output callback
|
|
||||||
func (sdb *builder) WriteStateDiffObject(args sd.Args, params sd.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
|
||||||
// Load tries for old and new states
|
|
||||||
oldTrie, err := sdb.StateCache.OpenTrie(args.OldStateRoot)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
|
||||||
}
|
|
||||||
newTrie, err := sdb.StateCache.OpenTrie(args.NewStateRoot)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split old and new tries into corresponding subtrie iterators
|
|
||||||
oldIters1 := iter.SubtrieIterators(oldTrie, sdb.numWorkers)
|
|
||||||
oldIters2 := iter.SubtrieIterators(oldTrie, sdb.numWorkers)
|
|
||||||
newIters1 := iter.SubtrieIterators(newTrie, sdb.numWorkers)
|
|
||||||
newIters2 := iter.SubtrieIterators(newTrie, sdb.numWorkers)
|
|
||||||
|
|
||||||
// Create iterators ahead of time to avoid race condition in state.Trie access
|
|
||||||
// We do two state iterations per subtrie: one for new/updated nodes,
|
|
||||||
// one for deleted/updated nodes; prepare 2 iterator instances for each task
|
|
||||||
var iterPairs [][]sd.IterPair
|
|
||||||
for i := uint(0); i < sdb.numWorkers; i++ {
|
|
||||||
iterPairs = append(iterPairs, []sd.IterPair{
|
|
||||||
{Older: oldIters1[i], Newer: newIters1[i]},
|
|
||||||
{Older: oldIters2[i], Newer: newIters2[i]},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Dispatch workers to process trie data; sync and collect results here via channels
|
|
||||||
nodeChan := make(chan sdtypes.StateNode)
|
|
||||||
codeChan := make(chan sdtypes.CodeAndCodeHash)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
nodeSender := func(node sdtypes.StateNode) error { nodeChan <- node; return nil }
|
|
||||||
codeSender := func(code sdtypes.CodeAndCodeHash) error { codeChan <- code; return nil }
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for w := uint(0); w < sdb.numWorkers; w++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(worker uint) {
|
|
||||||
defer wg.Done()
|
|
||||||
var err error
|
|
||||||
logger := log.New("hash", args.BlockHash.Hex(), "number", args.BlockNumber)
|
|
||||||
if !params.IntermediateStateNodes {
|
|
||||||
err = sdb.BuildStateDiffWithoutIntermediateStateNodes(iterPairs[worker], params, nodeSender, codeSender, logger)
|
|
||||||
} else {
|
|
||||||
err = sdb.BuildStateDiffWithIntermediateStateNodes(iterPairs[worker], params, nodeSender, codeSender, logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("buildStateDiff error for worker %d, params %+v", worker, params)
|
|
||||||
}
|
|
||||||
}(w)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
close(nodeChan)
|
|
||||||
close(codeChan)
|
|
||||||
}()
|
|
||||||
|
|
||||||
for nodeChan != nil || codeChan != nil {
|
|
||||||
select {
|
|
||||||
case node, more := <-nodeChan:
|
|
||||||
if more {
|
|
||||||
if err := output(node); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
nodeChan = nil
|
|
||||||
}
|
|
||||||
case codeAndCodeHash, more := <-codeChan:
|
|
||||||
if more {
|
|
||||||
if err := codeOutput(codeAndCodeHash); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
codeChan = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
2607
pkg/builder_test.go
2607
pkg/builder_test.go
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,7 @@
|
|||||||
package statediff
|
package statediff
|
||||||
|
|
||||||
// Config holds config params for the statediffing service
|
// ServiceConfig holds config params for the statediffing service
|
||||||
type Config struct {
|
type ServiceConfig struct {
|
||||||
ServiceWorkers uint
|
ServiceWorkers uint
|
||||||
TrieWorkers uint
|
TrieWorkers uint
|
||||||
WorkerQueueSize uint
|
WorkerQueueSize uint
|
||||||
|
@ -22,9 +22,11 @@ package statediff
|
|||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func sortKeys(data AccountMap) []string {
|
func sortKeys(data sdtypes.AccountMap) []string {
|
||||||
keys := make([]string, 0, len(data))
|
keys := make([]string, 0, len(data))
|
||||||
for key := range data {
|
for key := range data {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
|
@ -19,7 +19,7 @@ package prom
|
|||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
dbmetrics "github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
dbmetrics "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -56,23 +56,29 @@ type LvLDBReaderConfig struct {
|
|||||||
DBCacheSize int
|
DBCacheSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLvlDBReader creates a new Read using LevelDB
|
// NewLvlDBReader creates a new Reader using LevelDB
|
||||||
func NewLvlDBReader(conf LvLDBReaderConfig) (*LvlDBReader, error) {
|
func NewLvlDBReader(conf LvLDBReaderConfig) (*LvlDBReader, error) {
|
||||||
var edb ethdb.Database
|
var edb ethdb.Database
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if conf.Mode == "local" {
|
switch conf.Mode {
|
||||||
kvdb, _ := rawdb.NewLevelDBDatabase(conf.Path, conf.DBCacheSize, 256, "eth-statediff-service", true)
|
case "local":
|
||||||
edb, err = rawdb.NewDatabaseWithFreezer(kvdb, conf.AncientPath, "eth-statediff-service", true)
|
edb, err = rawdb.NewLevelDBDatabase(conf.Path, conf.DBCacheSize, 256, "eth-statediff-service", true)
|
||||||
}
|
|
||||||
|
|
||||||
if conf.Mode == "remote" {
|
|
||||||
edb, err = client.NewDatabaseClient(conf.Url)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
edb, err = rawdb.NewDatabaseWithFreezer(edb, conf.AncientPath, "eth-statediff-service", true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "remote":
|
||||||
|
edb, err = client.NewDatabaseClient(conf.Url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return &LvlDBReader{
|
return &LvlDBReader{
|
||||||
ethDB: edb,
|
ethDB: edb,
|
||||||
stateDB: state.NewDatabaseWithConfig(edb, conf.TrieConfig),
|
stateDB: state.NewDatabaseWithConfig(edb, conf.TrieConfig),
|
||||||
@ -84,11 +90,11 @@ func NewLvlDBReader(conf LvLDBReaderConfig) (*LvlDBReader, error) {
|
|||||||
func (ldr *LvlDBReader) GetBlockByHash(hash common.Hash) (*types.Block, error) {
|
func (ldr *LvlDBReader) GetBlockByHash(hash common.Hash) (*types.Block, error) {
|
||||||
height := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
height := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
||||||
if height == nil {
|
if height == nil {
|
||||||
return nil, fmt.Errorf("unable to read header height for header hash %s", hash.String())
|
return nil, fmt.Errorf("unable to read header height for header hash %s", hash)
|
||||||
}
|
}
|
||||||
block := rawdb.ReadBlock(ldr.ethDB, hash, *height)
|
block := rawdb.ReadBlock(ldr.ethDB, hash, *height)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, fmt.Errorf("unable to read block at height %d hash %s", *height, hash.String())
|
return nil, fmt.Errorf("unable to read block at height %d hash %s", *height, hash)
|
||||||
}
|
}
|
||||||
return block, nil
|
return block, nil
|
||||||
}
|
}
|
||||||
@ -97,7 +103,7 @@ func (ldr *LvlDBReader) GetBlockByNumber(number uint64) (*types.Block, error) {
|
|||||||
hash := rawdb.ReadCanonicalHash(ldr.ethDB, number)
|
hash := rawdb.ReadCanonicalHash(ldr.ethDB, number)
|
||||||
block := rawdb.ReadBlock(ldr.ethDB, hash, number)
|
block := rawdb.ReadBlock(ldr.ethDB, hash, number)
|
||||||
if block == nil {
|
if block == nil {
|
||||||
return nil, fmt.Errorf("unable to read block at height %d hash %s", number, hash.String())
|
return nil, fmt.Errorf("unable to read block at height %d hash %s", number, hash)
|
||||||
}
|
}
|
||||||
return block, nil
|
return block, nil
|
||||||
}
|
}
|
||||||
@ -106,11 +112,11 @@ func (ldr *LvlDBReader) GetBlockByNumber(number uint64) (*types.Block, error) {
|
|||||||
func (ldr *LvlDBReader) GetReceiptsByHash(hash common.Hash) (types.Receipts, error) {
|
func (ldr *LvlDBReader) GetReceiptsByHash(hash common.Hash) (types.Receipts, error) {
|
||||||
number := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
number := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
||||||
if number == nil {
|
if number == nil {
|
||||||
return nil, fmt.Errorf("unable to read header height for header hash %s", hash.String())
|
return nil, fmt.Errorf("unable to read header height for header hash %s", hash)
|
||||||
}
|
}
|
||||||
receipts := rawdb.ReadReceipts(ldr.ethDB, hash, *number, ldr.chainConfig)
|
receipts := rawdb.ReadReceipts(ldr.ethDB, hash, *number, ldr.chainConfig)
|
||||||
if receipts == nil {
|
if receipts == nil {
|
||||||
return nil, fmt.Errorf("unable to read receipts at height %d hash %s", number, hash.String())
|
return nil, fmt.Errorf("unable to read receipts at height %d hash %s", number, hash)
|
||||||
}
|
}
|
||||||
return receipts, nil
|
return receipts, nil
|
||||||
}
|
}
|
||||||
@ -119,11 +125,11 @@ func (ldr *LvlDBReader) GetReceiptsByHash(hash common.Hash) (types.Receipts, err
|
|||||||
func (ldr *LvlDBReader) GetTdByHash(hash common.Hash) (*big.Int, error) {
|
func (ldr *LvlDBReader) GetTdByHash(hash common.Hash) (*big.Int, error) {
|
||||||
number := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
number := rawdb.ReadHeaderNumber(ldr.ethDB, hash)
|
||||||
if number == nil {
|
if number == nil {
|
||||||
return nil, fmt.Errorf("unable to read header height for header hash %s", hash.String())
|
return nil, fmt.Errorf("unable to read header height for header hash %s", hash)
|
||||||
}
|
}
|
||||||
td := rawdb.ReadTd(ldr.ethDB, hash, *number)
|
td := rawdb.ReadTd(ldr.ethDB, hash, *number)
|
||||||
if td == nil {
|
if td == nil {
|
||||||
return nil, fmt.Errorf("unable to read total difficulty at height %d hash %s", number, hash.String())
|
return nil, fmt.Errorf("unable to read total difficulty at height %d hash %s", number, hash)
|
||||||
}
|
}
|
||||||
return td, nil
|
return td, nil
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ func StartIPCEndpoint(ipcEndpoint string, apis []rpc.API) (net.Listener, *rpc.Se
|
|||||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
log.Debug("IPC registered", "namespace", api.Namespace)
|
log.WithField("namespace", api.Namespace).Debug("IPC server registered")
|
||||||
}
|
}
|
||||||
// All APIs registered, start the IPC listener.
|
// All APIs registered, start the IPC listener.
|
||||||
listener, err := ipcListen(ipcEndpoint)
|
listener, err := ipcListen(ipcEndpoint)
|
||||||
|
130
pkg/service.go
130
pkg/service.go
@ -22,15 +22,15 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
sd "github.com/ethereum/go-ethereum/statediff"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
@ -38,35 +38,10 @@ import (
|
|||||||
|
|
||||||
const defaultQueueSize = 1024
|
const defaultQueueSize = 1024
|
||||||
|
|
||||||
// StateDiffService is the state-diffing service interface
|
|
||||||
type StateDiffService interface {
|
|
||||||
// Lifecycle Start() and Stop()
|
|
||||||
node.Lifecycle
|
|
||||||
// APIs and Protocols() interface for node service registration
|
|
||||||
APIs() []rpc.API
|
|
||||||
Protocols() []p2p.Protocol
|
|
||||||
// Loop is the main event loop for processing state diffs
|
|
||||||
Loop(wg *sync.WaitGroup) error
|
|
||||||
// Run is a one-off command to run on a predefined set of ranges
|
|
||||||
Run(ranges []RangeRequest, parallel bool) error
|
|
||||||
// StateDiffAt method to get state diff object at specific block
|
|
||||||
StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Payload, error)
|
|
||||||
// StateDiffFor method to get state diff object at specific block
|
|
||||||
StateDiffFor(blockHash common.Hash, params sd.Params) (*sd.Payload, error)
|
|
||||||
// StateTrieAt method to get state trie object at specific block
|
|
||||||
StateTrieAt(blockNumber uint64, params sd.Params) (*sd.Payload, error)
|
|
||||||
// WriteStateDiffAt method to write state diff object directly to DB
|
|
||||||
WriteStateDiffAt(blockNumber uint64, params sd.Params) error
|
|
||||||
// WriteStateDiffFor method to get state trie object at specific block
|
|
||||||
WriteStateDiffFor(blockHash common.Hash, params sd.Params) error
|
|
||||||
// WriteStateDiffsInRange method to wrtie state diff objects within the range directly to the DB
|
|
||||||
WriteStateDiffsInRange(start, stop uint64, params sd.Params) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service is the underlying struct for the state diffing service
|
// Service is the underlying struct for the state diffing service
|
||||||
type Service struct {
|
type Service struct {
|
||||||
// Used to build the state diff objects
|
// Used to build the state diff objects
|
||||||
Builder sd.Builder
|
builder statediff.Builder
|
||||||
// Used to read data from LevelDB
|
// Used to read data from LevelDB
|
||||||
lvlDBReader Reader
|
lvlDBReader Reader
|
||||||
// Used to signal shutdown of the service
|
// Used to signal shutdown of the service
|
||||||
@ -82,22 +57,20 @@ type Service struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewStateDiffService creates a new Service
|
// NewStateDiffService creates a new Service
|
||||||
func NewStateDiffService(lvlDBReader Reader, indexer interfaces.StateDiffIndexer, conf Config) (*Service, error) {
|
func NewStateDiffService(lvlDBReader Reader, indexer interfaces.StateDiffIndexer, conf ServiceConfig) *Service {
|
||||||
b, err := NewBuilder(lvlDBReader.StateDB(), conf.TrieWorkers)
|
builder := statediff.NewBuilder(adapt.GethStateView(lvlDBReader.StateDB()))
|
||||||
if err != nil {
|
builder.SetSubtrieWorkers(conf.TrieWorkers)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if conf.WorkerQueueSize == 0 {
|
if conf.WorkerQueueSize == 0 {
|
||||||
conf.WorkerQueueSize = defaultQueueSize
|
conf.WorkerQueueSize = defaultQueueSize
|
||||||
}
|
}
|
||||||
return &Service{
|
return &Service{
|
||||||
lvlDBReader: lvlDBReader,
|
lvlDBReader: lvlDBReader,
|
||||||
Builder: b,
|
builder: builder,
|
||||||
indexer: indexer,
|
indexer: indexer,
|
||||||
workers: conf.ServiceWorkers,
|
workers: conf.ServiceWorkers,
|
||||||
queue: make(chan RangeRequest, conf.WorkerQueueSize),
|
queue: make(chan RangeRequest, conf.WorkerQueueSize),
|
||||||
preruns: conf.PreRuns,
|
preruns: conf.PreRuns,
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Protocols exports the services p2p protocols, this service has none
|
// Protocols exports the services p2p protocols, this service has none
|
||||||
@ -117,7 +90,7 @@ func (sds *Service) APIs() []rpc.API {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func segmentRange(workers, start, stop uint64, params sd.Params) []RangeRequest {
|
func segmentRange(workers, start, stop uint64, params statediff.Params) []RangeRequest {
|
||||||
segmentSize := ((stop - start) + 1) / workers
|
segmentSize := ((stop - start) + 1) / workers
|
||||||
remainder := ((stop - start) + 1) % workers
|
remainder := ((stop - start) + 1) % workers
|
||||||
numOfSegments := workers
|
numOfSegments := workers
|
||||||
@ -216,25 +189,24 @@ func (sds *Service) Loop(wg *sync.WaitGroup) error {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case blockRange := <-sds.queue:
|
case blockRange := <-sds.queue:
|
||||||
logrus.Infof("service worker %d received range (%d, %d) off of work queue, beginning processing", id, blockRange.Start, blockRange.Stop)
|
log := logrus.WithField("range", blockRange).WithField("worker", id)
|
||||||
|
log.Debug("processing range")
|
||||||
prom.DecQueuedRanges()
|
prom.DecQueuedRanges()
|
||||||
for j := blockRange.Start; j <= blockRange.Stop; j++ {
|
for j := blockRange.Start; j <= blockRange.Stop; j++ {
|
||||||
if err := sds.WriteStateDiffAt(j, blockRange.Params); err != nil {
|
if err := sds.WriteStateDiffAt(j, blockRange.Params); err != nil {
|
||||||
logrus.Errorf("service worker %d error writing statediff at height %d in range (%d, %d) : %v", id, j, blockRange.Start, blockRange.Stop, err)
|
log.Errorf("error writing statediff at block %d: %v", j, err)
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-sds.quitChan:
|
case <-sds.quitChan:
|
||||||
logrus.Infof("closing service worker %d\n"+
|
log.Infof("closing service worker (last processed block: %d)", j)
|
||||||
"working in range (%d, %d)\n"+
|
|
||||||
"last processed height: %d", id, blockRange.Start, blockRange.Stop, j)
|
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
logrus.Infof("service worker %d finished processing statediff height %d in range (%d, %d)", id, j, blockRange.Start, blockRange.Stop)
|
log.Infof("Finished processing block %d", j)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logrus.Infof("service worker %d finished processing range (%d, %d)", id, blockRange.Start, blockRange.Stop)
|
log.Debugf("Finished processing range")
|
||||||
case <-sds.quitChan:
|
case <-sds.quitChan:
|
||||||
logrus.Infof("closing the statediff service loop worker %d", id)
|
logrus.Debugf("closing the statediff service loop worker %d", id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -251,7 +223,7 @@ func (sds *Service) Loop(wg *sync.WaitGroup) error {
|
|||||||
|
|
||||||
// StateDiffAt returns a state diff object payload at the specific blockheight
|
// StateDiffAt returns a state diff object payload at the specific blockheight
|
||||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
|
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
|
||||||
func (sds *Service) StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Payload, error) {
|
func (sds *Service) StateDiffAt(blockNumber uint64, params statediff.Params) (*statediff.Payload, error) {
|
||||||
currentBlock, err := sds.lvlDBReader.GetBlockByNumber(blockNumber)
|
currentBlock, err := sds.lvlDBReader.GetBlockByNumber(blockNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -273,12 +245,12 @@ func (sds *Service) StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Paylo
|
|||||||
|
|
||||||
// StateDiffFor returns a state diff object payload for the specific blockhash
|
// StateDiffFor returns a state diff object payload for the specific blockhash
|
||||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
|
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
|
||||||
func (sds *Service) StateDiffFor(blockHash common.Hash, params sd.Params) (*sd.Payload, error) {
|
func (sds *Service) StateDiffFor(blockHash common.Hash, params statediff.Params) (*statediff.Payload, error) {
|
||||||
currentBlock, err := sds.lvlDBReader.GetBlockByHash(blockHash)
|
currentBlock, err := sds.lvlDBReader.GetBlockByHash(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Infof("sending state diff at block %s", blockHash.Hex())
|
logrus.Infof("sending state diff at block %s", blockHash)
|
||||||
|
|
||||||
// compute leaf paths of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafPaths()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
@ -294,8 +266,8 @@ func (sds *Service) StateDiffFor(blockHash common.Hash, params sd.Params) (*sd.P
|
|||||||
}
|
}
|
||||||
|
|
||||||
// processStateDiff method builds the state diff payload from the current block, parent state root, and provided params
|
// processStateDiff method builds the state diff payload from the current block, parent state root, and provided params
|
||||||
func (sds *Service) processStateDiff(currentBlock *types.Block, parentRoot common.Hash, params sd.Params) (*sd.Payload, error) {
|
func (sds *Service) processStateDiff(currentBlock *types.Block, parentRoot common.Hash, params statediff.Params) (*statediff.Payload, error) {
|
||||||
stateDiff, err := sds.Builder.BuildStateDiffObject(sd.Args{
|
stateDiff, err := sds.builder.BuildStateDiffObject(statediff.Args{
|
||||||
BlockHash: currentBlock.Hash(),
|
BlockHash: currentBlock.Hash(),
|
||||||
BlockNumber: currentBlock.Number(),
|
BlockNumber: currentBlock.Number(),
|
||||||
OldStateRoot: parentRoot,
|
OldStateRoot: parentRoot,
|
||||||
@ -312,8 +284,8 @@ func (sds *Service) processStateDiff(currentBlock *types.Block, parentRoot commo
|
|||||||
return sds.newPayload(stateDiffRlp, currentBlock, params)
|
return sds.newPayload(stateDiffRlp, currentBlock, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sds *Service) newPayload(stateObject []byte, block *types.Block, params sd.Params) (*sd.Payload, error) {
|
func (sds *Service) newPayload(stateObject []byte, block *types.Block, params statediff.Params) (*statediff.Payload, error) {
|
||||||
payload := &sd.Payload{
|
payload := &statediff.Payload{
|
||||||
StateObjectRlp: stateObject,
|
StateObjectRlp: stateObject,
|
||||||
}
|
}
|
||||||
if params.IncludeBlock {
|
if params.IncludeBlock {
|
||||||
@ -344,34 +316,6 @@ func (sds *Service) newPayload(stateObject []byte, block *types.Block, params sd
|
|||||||
return payload, nil
|
return payload, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateTrieAt returns a state trie object payload at the specified blockheight
|
|
||||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node for historical data
|
|
||||||
func (sds *Service) StateTrieAt(blockNumber uint64, params sd.Params) (*sd.Payload, error) {
|
|
||||||
currentBlock, err := sds.lvlDBReader.GetBlockByNumber(blockNumber)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
logrus.Infof("sending state trie at block %d", blockNumber)
|
|
||||||
|
|
||||||
// compute leaf paths of watched addresses in the params
|
|
||||||
params.ComputeWatchedAddressesLeafPaths()
|
|
||||||
|
|
||||||
return sds.processStateTrie(currentBlock, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sds *Service) processStateTrie(block *types.Block, params sd.Params) (*sd.Payload, error) {
|
|
||||||
stateNodes, err := sds.Builder.BuildStateTrieObject(block)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stateTrieRlp, err := rlp.EncodeToBytes(&stateNodes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
logrus.Infof("state trie object at block %d is %d bytes in length", block.Number().Uint64(), len(stateTrieRlp))
|
|
||||||
return sds.newPayload(stateTrieRlp, block, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start is used to begin the service
|
// Start is used to begin the service
|
||||||
func (sds *Service) Start() error {
|
func (sds *Service) Start() error {
|
||||||
logrus.Info("starting statediff service")
|
logrus.Info("starting statediff service")
|
||||||
@ -388,7 +332,7 @@ func (sds *Service) Stop() error {
|
|||||||
// WriteStateDiffAt writes a state diff at the specific blockheight directly to the database
|
// WriteStateDiffAt writes a state diff at the specific blockheight directly to the database
|
||||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node
|
// This operation cannot be performed back past the point of db pruning; it requires an archival node
|
||||||
// for historical data
|
// for historical data
|
||||||
func (sds *Service) WriteStateDiffAt(blockNumber uint64, params sd.Params) error {
|
func (sds *Service) WriteStateDiffAt(blockNumber uint64, params statediff.Params) error {
|
||||||
logrus.Infof("Writing state diff at block %d", blockNumber)
|
logrus.Infof("Writing state diff at block %d", blockNumber)
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
currentBlock, err := sds.lvlDBReader.GetBlockByNumber(blockNumber)
|
currentBlock, err := sds.lvlDBReader.GetBlockByNumber(blockNumber)
|
||||||
@ -413,8 +357,8 @@ func (sds *Service) WriteStateDiffAt(blockNumber uint64, params sd.Params) error
|
|||||||
// WriteStateDiffFor writes a state diff for the specific blockHash directly to the database
|
// WriteStateDiffFor writes a state diff for the specific blockHash directly to the database
|
||||||
// This operation cannot be performed back past the point of db pruning; it requires an archival node
|
// This operation cannot be performed back past the point of db pruning; it requires an archival node
|
||||||
// for historical data
|
// for historical data
|
||||||
func (sds *Service) WriteStateDiffFor(blockHash common.Hash, params sd.Params) error {
|
func (sds *Service) WriteStateDiffFor(blockHash common.Hash, params statediff.Params) error {
|
||||||
logrus.Infof("Writing state diff for block %s", blockHash.Hex())
|
logrus.Infof("Writing state diff for block %s", blockHash)
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
currentBlock, err := sds.lvlDBReader.GetBlockByHash(blockHash)
|
currentBlock, err := sds.lvlDBReader.GetBlockByHash(blockHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -436,7 +380,7 @@ func (sds *Service) WriteStateDiffFor(blockHash common.Hash, params sd.Params) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Writes a state diff from the current block, parent state root, and provided params
|
// Writes a state diff from the current block, parent state root, and provided params
|
||||||
func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, params sd.Params, t time.Time) error {
|
func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, params statediff.Params, t time.Time) error {
|
||||||
var totalDifficulty *big.Int
|
var totalDifficulty *big.Int
|
||||||
var receipts types.Receipts
|
var receipts types.Receipts
|
||||||
var err error
|
var err error
|
||||||
@ -461,28 +405,30 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// defer handling of commit/rollback for any return case
|
// defer handling of commit/rollback for any return case
|
||||||
output := func(node sdtypes.StateNode) error {
|
output := func(node sdtypes.StateLeafNode) error {
|
||||||
return sds.indexer.PushStateNode(tx, node, block.Hash().String())
|
return sds.indexer.PushStateNode(tx, node, block.Hash().String())
|
||||||
}
|
}
|
||||||
codeOutput := func(c sdtypes.CodeAndCodeHash) error {
|
codeOutput := func(c sdtypes.IPLD) error {
|
||||||
return sds.indexer.PushCodeAndCodeHash(tx, c)
|
return sds.indexer.PushIPLD(tx, c)
|
||||||
}
|
}
|
||||||
prom.SetTimeMetric(prom.T_BLOCK_PROCESSING, time.Now().Sub(t))
|
prom.SetTimeMetric(prom.T_BLOCK_PROCESSING, time.Now().Sub(t))
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
err = sds.Builder.WriteStateDiffObject(sd.Args{
|
err = sds.builder.WriteStateDiff(statediff.Args{
|
||||||
NewStateRoot: block.Root(),
|
NewStateRoot: block.Root(),
|
||||||
OldStateRoot: parentRoot,
|
OldStateRoot: parentRoot,
|
||||||
|
BlockNumber: block.Number(),
|
||||||
|
BlockHash: block.Hash(),
|
||||||
}, params, output, codeOutput)
|
}, params, output, codeOutput)
|
||||||
prom.SetTimeMetric(prom.T_STATE_PROCESSING, time.Now().Sub(t))
|
prom.SetTimeMetric(prom.T_STATE_PROCESSING, time.Now().Sub(t))
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
err = tx.Submit(err)
|
err = tx.Submit()
|
||||||
prom.SetLastProcessedHeight(height)
|
prom.SetLastProcessedHeight(height)
|
||||||
prom.SetTimeMetric(prom.T_POSTGRES_TX_COMMIT, time.Now().Sub(t))
|
prom.SetTimeMetric(prom.T_POSTGRES_TX_COMMIT, time.Now().Sub(t))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteStateDiffsInRange adds a RangeRequest to the work queue
|
// WriteStateDiffsInRange adds a RangeRequest to the work queue
|
||||||
func (sds *Service) WriteStateDiffsInRange(start, stop uint64, params sd.Params) error {
|
func (sds *Service) WriteStateDiffsInRange(start, stop uint64, params statediff.Params) error {
|
||||||
if stop < start {
|
if stop < start {
|
||||||
return fmt.Errorf("invalid block range (%d, %d): stop height must be greater or equal to start height", start, stop)
|
return fmt.Errorf("invalid block range (%d, %d): stop height must be greater or equal to start height", start, stop)
|
||||||
}
|
}
|
||||||
@ -490,7 +436,7 @@ func (sds *Service) WriteStateDiffsInRange(start, stop uint64, params sd.Params)
|
|||||||
select {
|
select {
|
||||||
case sds.queue <- RangeRequest{Start: start, Stop: stop, Params: params}:
|
case sds.queue <- RangeRequest{Start: start, Stop: stop, Params: params}:
|
||||||
prom.IncQueuedRanges()
|
prom.IncQueuedRanges()
|
||||||
logrus.Infof("added range (%d, %d) to the worker queue", start, stop)
|
logrus.Infof("Added range (%d, %d) to the worker queue", start, stop)
|
||||||
return nil
|
return nil
|
||||||
case <-blocked.C:
|
case <-blocked.C:
|
||||||
return fmt.Errorf("unable to add range (%d, %d) to the worker queue", start, stop)
|
return fmt.Errorf("unable to add range (%d, %d) to the worker queue", start, stop)
|
||||||
|
22
pkg/types.go
22
pkg/types.go
@ -20,25 +20,17 @@
|
|||||||
package statediff
|
package statediff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"fmt"
|
||||||
sd "github.com/ethereum/go-ethereum/statediff"
|
|
||||||
sdTypes "github.com/ethereum/go-ethereum/statediff/types"
|
sd "github.com/cerc-io/plugeth-statediff"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AccountMap is a mapping of hex encoded path => account wrapper
|
|
||||||
type AccountMap map[string]accountWrapper
|
|
||||||
|
|
||||||
// accountWrapper is used to temporary associate the unpacked node with its raw values
|
|
||||||
type accountWrapper struct {
|
|
||||||
Account *types.StateAccount
|
|
||||||
NodeType sdTypes.NodeType
|
|
||||||
Path []byte
|
|
||||||
NodeValue []byte
|
|
||||||
LeafKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// RangeRequest holds range quest work params
|
// RangeRequest holds range quest work params
|
||||||
type RangeRequest struct {
|
type RangeRequest struct {
|
||||||
Start, Stop uint64
|
Start, Stop uint64
|
||||||
Params sd.Params
|
Params sd.Params
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r RangeRequest) String() string {
|
||||||
|
return fmt.Sprintf("[%d,%d]", r.Start, r.Stop)
|
||||||
|
}
|
||||||
|
22
scripts/request-range.sh
Executable file
22
scripts/request-range.sh
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
FROM=$1
|
||||||
|
TO=$2
|
||||||
|
URL=127.0.0.1:8545
|
||||||
|
|
||||||
|
DATA='{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "statediff_writeStateDiffsInRange",
|
||||||
|
"params": ['"$FROM"', '"$TO"', {
|
||||||
|
"includeBlock": true,
|
||||||
|
"includeReceipts": true,
|
||||||
|
"includeTD": true,
|
||||||
|
"includeCode": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"id": 1
|
||||||
|
}'
|
||||||
|
|
||||||
|
exec curl -s $URL -X POST -H 'Content-Type: application/json' --data "$DATA"
|
@ -17,4 +17,4 @@ mkdir -p /app/geth-rw && \
|
|||||||
sudo mount -t overlay overlay -o lowerdir=/app/geth-ro,upperdir=/tmp/overlay/upper,workdir=/tmp/overlay/work /app/geth-rw && \
|
sudo mount -t overlay overlay -o lowerdir=/app/geth-ro,upperdir=/tmp/overlay/upper,workdir=/tmp/overlay/work /app/geth-rw && \
|
||||||
|
|
||||||
echo "Running the statediff service" && \
|
echo "Running the statediff service" && \
|
||||||
sudo ./eth-statediff-service "$VDB_COMMAND" --config=config.toml
|
exec sudo ./eth-statediff-service "$VDB_COMMAND" --config=/config/config.toml
|
||||||
|
16
test/ci-chain.json
Normal file
16
test/ci-chain.json
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"chainId": 41337,
|
||||||
|
"homesteadBlock": 0,
|
||||||
|
"eip150Block": 0,
|
||||||
|
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"eip155Block": 0,
|
||||||
|
"eip158Block": 0,
|
||||||
|
"byzantiumBlock": 0,
|
||||||
|
"constantinopleBlock": 0,
|
||||||
|
"petersburgBlock": 0,
|
||||||
|
"istanbulBlock": 0,
|
||||||
|
"clique": {
|
||||||
|
"period": 5,
|
||||||
|
"epoch": 30000
|
||||||
|
}
|
||||||
|
}
|
36
test/ci-config.toml
Normal file
36
test/ci-config.toml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
[leveldb]
|
||||||
|
mode = "local"
|
||||||
|
url = "http://127.0.0.1:8082/"
|
||||||
|
|
||||||
|
[server]
|
||||||
|
ipcPath = ".ipc"
|
||||||
|
httpPath = "0.0.0.0:8545"
|
||||||
|
|
||||||
|
[statediff]
|
||||||
|
serviceWorkers = 1
|
||||||
|
workerQueueSize = 1024
|
||||||
|
trieWorkers = 4
|
||||||
|
|
||||||
|
[log]
|
||||||
|
level = "debug"
|
||||||
|
|
||||||
|
[database]
|
||||||
|
name = "cerc_testing"
|
||||||
|
hostname = "localhost"
|
||||||
|
port = 8077
|
||||||
|
user = "vdbm"
|
||||||
|
password = "password"
|
||||||
|
type = "postgres"
|
||||||
|
driver = "sqlx"
|
||||||
|
|
||||||
|
[cache]
|
||||||
|
database = 1024
|
||||||
|
trie = 1024
|
||||||
|
|
||||||
|
[ethereum]
|
||||||
|
chainConfig = "test/ci-chain.json"
|
||||||
|
nodeID = ""
|
||||||
|
clientName = "eth-statediff-service"
|
||||||
|
genesisBlock = "0x37cbb63c7150a7b60f2878433963ed8ba7e5f82fb2683ec7a945c974e1cf4e05"
|
||||||
|
networkID = 1
|
||||||
|
chainID = 41337
|
23
test/compose.yml
Normal file
23
test/compose.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
services:
|
||||||
|
migrations:
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- ipld-eth-db
|
||||||
|
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.0.5-alpha
|
||||||
|
environment:
|
||||||
|
DATABASE_USER: "vdbm"
|
||||||
|
DATABASE_NAME: "cerc_testing"
|
||||||
|
DATABASE_PASSWORD: "password"
|
||||||
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
|
||||||
|
ipld-eth-db:
|
||||||
|
image: timescale/timescaledb:latest-pg14
|
||||||
|
restart: always
|
||||||
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "vdbm"
|
||||||
|
POSTGRES_DB: "cerc_testing"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
ports:
|
||||||
|
- 127.0.0.1:8077:5432
|
Loading…
Reference in New Issue
Block a user