Compare commits
66 Commits
7c54945dc5
...
18a7f23173
Author | SHA1 | Date | |
---|---|---|---|
|
18a7f23173 | ||
|
ef1846f58c | ||
|
2eaa2c2262 | ||
|
bfb0447710 | ||
|
2c08f5594c | ||
|
253b1087bf | ||
|
8a3b6bf2ac | ||
|
b221bde694 | ||
|
2c41537636 | ||
|
d83b088c37 | ||
|
aca78f89b7 | ||
|
1788b899a4 | ||
|
1fe7a04af0 | ||
|
68ebdca6f9 | ||
|
5f7915649d | ||
|
f6df15cb38 | ||
|
03517a0eb4 | ||
|
54205d8787 | ||
|
8d8ff99d19 | ||
|
3054063942 | ||
|
54e181ca68 | ||
|
269333bb17 | ||
|
db532467cc | ||
|
ccdf9d91fc | ||
|
aea3decebf | ||
|
2db16d69da | ||
|
e3d694e63c | ||
|
2c0f3456f5 | ||
46cd8b1834 | |||
c0cd87ba6a | |||
|
2db235f244 | ||
a827c4a36b | |||
14b9c169bc | |||
a2772762e1 | |||
b1440d9673 | |||
|
67d8bced4f | ||
|
12f4810ced | ||
|
7a8d38c955 | ||
|
d09cd0afe6 | ||
|
bcca82eaa3 | ||
|
dad77b561d | ||
|
c939822a95 | ||
6d103cb1f1 | |||
99f84b6fe6 | |||
5b7f5feb1b | |||
7f8885f044 | |||
d235f3b84c | |||
|
1fdb8763ac | ||
|
cd5aee30c7 | ||
|
040638ca05 | ||
|
681e656034 | ||
85896f91b7 | |||
52c7f84432 | |||
aa6ee578f4 | |||
|
1f898f60c3 | ||
0c56037e1f | |||
|
9aa683442e | ||
|
adf3dd4b6f | ||
|
5ea4b6766a | ||
|
2c4fd6f099 | ||
|
f964b53fe3 | ||
b8dad6a09b | |||
|
1dc90d0417 | ||
|
784ffb8726 | ||
|
1ddffe65be | ||
cdcc3df9f2 |
29
.github/workflows/issues-notion-sync.yml
vendored
29
.github/workflows/issues-notion-sync.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
name: Notion Sync
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
issues:
|
|
||||||
types:
|
|
||||||
[
|
|
||||||
opened,
|
|
||||||
edited,
|
|
||||||
labeled,
|
|
||||||
unlabeled,
|
|
||||||
assigned,
|
|
||||||
unassigned,
|
|
||||||
milestoned,
|
|
||||||
demilestoned,
|
|
||||||
reopened,
|
|
||||||
closed,
|
|
||||||
]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
notion_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Add GitHub Issues to Notion
|
|
||||||
steps:
|
|
||||||
- name: Add GitHub Issues to Notion
|
|
||||||
uses: vulcanize/notion-github-action@v1.2.4-issueid
|
|
||||||
with:
|
|
||||||
notion-token: ${{ secrets.NOTION_TOKEN }}
|
|
||||||
notion-db: ${{ secrets.NOTION_DATABASE }}
|
|
35
.github/workflows/manual_publish.yml
vendored
Normal file
35
.github/workflows/manual_publish.yml
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
name: MANUAL Override Publish from release SHA to TAG
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
giteaPublishTag:
|
||||||
|
description: 'Release TAG to publish TO on gitea; e.g. v4.1.5-alpha'
|
||||||
|
required: true
|
||||||
|
cercContainerTag:
|
||||||
|
description: 'Container (truncated!!! SHA) to release-tag FROM'
|
||||||
|
required: true
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
# This workflow contains a single job called "build"
|
||||||
|
build:
|
||||||
|
name: Pull SHA and add release-tag
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Get the version
|
||||||
|
id: vars
|
||||||
|
run: |
|
||||||
|
echo ::set-output name=sha::$(echo ${cercContainerTag:0:7})
|
||||||
|
- name: Pull docker image by SHA
|
||||||
|
run: docker pull git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}}
|
||||||
|
- name: Tag docker image TAG
|
||||||
|
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.giteaPublishTag}}
|
||||||
|
- name: Tag docker image TAG
|
||||||
|
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.cercContainerTag}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:latest
|
||||||
|
- name: Docker Login
|
||||||
|
run: echo ${{ secrets.GITEA_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||||
|
- name: Docker Push Release Tag
|
||||||
|
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{github.event.inputs.giteaPublishTag}}
|
||||||
|
- name: Docker Push LATEST Tag
|
||||||
|
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:latest
|
7
.github/workflows/on-pr.yaml
vendored
7
.github/workflows/on-pr.yaml
vendored
@ -1,7 +0,0 @@
|
|||||||
name: Docker Build
|
|
||||||
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-tests:
|
|
||||||
uses: ./.github/workflows/tests.yml
|
|
73
.github/workflows/on-publish-pr.yml
vendored
Normal file
73
.github/workflows/on-publish-pr.yml
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
name: Publish Docker image
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
pull_request:
|
||||||
|
jobs:
|
||||||
|
pre_job:
|
||||||
|
# continue-on-error: true # Uncomment once integration is finished
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Map a step output to a job output
|
||||||
|
outputs:
|
||||||
|
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||||
|
steps:
|
||||||
|
- id: skip_check
|
||||||
|
uses: fkirc/skip-duplicate-actions@v4
|
||||||
|
with:
|
||||||
|
# All of these options are optional, so you can remove them if you are happy with the defaults
|
||||||
|
concurrent_skipping: "never"
|
||||||
|
skip_after_successful_duplicate: "true"
|
||||||
|
do_not_skip: '["workflow_dispatch", "schedule"]'
|
||||||
|
run-tests:
|
||||||
|
if: ${{ needs.pre_job.outputs.should_skip != 'true' }}
|
||||||
|
needs: pre_job
|
||||||
|
uses: ./.github/workflows/tests.yml
|
||||||
|
build:
|
||||||
|
name: Run docker build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: |
|
||||||
|
always() &&
|
||||||
|
(needs.run-tests.result == 'success' || needs.run-tests.result == 'skipped') &&
|
||||||
|
github.event_name == 'release'
|
||||||
|
needs: run-tests
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Get the version
|
||||||
|
id: vars
|
||||||
|
run: |
|
||||||
|
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||||
|
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||||
|
- name: Run docker build
|
||||||
|
run: make docker-build
|
||||||
|
- name: Tag docker image
|
||||||
|
run: docker tag cerc-io/eth-statediff-service git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
||||||
|
- name: Tag docker image TAG
|
||||||
|
run: docker tag git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.tag}}
|
||||||
|
- name: Docker Login
|
||||||
|
run: echo ${{ secrets.GITEA_TOKEN }} | docker login https://git.vdb.to -u cerccicd --password-stdin
|
||||||
|
- name: Docker Push
|
||||||
|
run: docker push git.vdb.to/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
||||||
|
# push_to_registries:
|
||||||
|
# name: Push Docker image to Docker Hub
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# if: |
|
||||||
|
# always() &&
|
||||||
|
# (needs.build.result == 'success') &&
|
||||||
|
# github.event_name == 'release'
|
||||||
|
# needs: build
|
||||||
|
# steps:
|
||||||
|
# - name: Get the version
|
||||||
|
# id: vars
|
||||||
|
# run: |
|
||||||
|
# echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
||||||
|
# echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
||||||
|
# - name: Docker Login to Github Registry
|
||||||
|
# run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
||||||
|
# - name: Docker Pull
|
||||||
|
# run: docker pull docker.pkg.github.com/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
||||||
|
# - name: Docker Login to Docker Registry
|
||||||
|
# run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
||||||
|
# - name: Tag docker image
|
||||||
|
# run: docker tag docker.pkg.github.com/cerc-io/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} cerc-io/eth-statediff-service:${{steps.vars.outputs.tag}}
|
||||||
|
# - name: Docker Push to Docker Hub
|
||||||
|
# run: docker push cerc-io/eth-statediff-service:${{steps.vars.outputs.tag}}
|
44
.github/workflows/publish.yaml
vendored
44
.github/workflows/publish.yaml
vendored
@ -1,44 +0,0 @@
|
|||||||
name: Publish Docker image
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published]
|
|
||||||
jobs:
|
|
||||||
run-tests:
|
|
||||||
uses: ./.github/workflows/tests.yml
|
|
||||||
build:
|
|
||||||
name: Run docker build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: run-tests
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
- name: Run docker build
|
|
||||||
run: make docker-build
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag vulcanize/eth-statediff-service docker.pkg.github.com/vulcanize/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Login
|
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
- name: Docker Push
|
|
||||||
run: docker push docker.pkg.github.com/vulcanize/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
push_to_registries:
|
|
||||||
name: Push Docker image to Docker Hub
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
steps:
|
|
||||||
- name: Get the version
|
|
||||||
id: vars
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=sha::$(echo ${GITHUB_SHA:0:7})
|
|
||||||
echo ::set-output name=tag::$(echo ${GITHUB_REF#refs/tags/})
|
|
||||||
- name: Docker Login to Github Registry
|
|
||||||
run: echo ${{ secrets.GITHUB_TOKEN }} | docker login https://docker.pkg.github.com -u vulcanize --password-stdin
|
|
||||||
- name: Docker Pull
|
|
||||||
run: docker pull docker.pkg.github.com/vulcanize/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}}
|
|
||||||
- name: Docker Login to Docker Registry
|
|
||||||
run: echo ${{ secrets.VULCANIZEJENKINS_PAT }} | docker login -u vulcanizejenkins --password-stdin
|
|
||||||
- name: Tag docker image
|
|
||||||
run: docker tag docker.pkg.github.com/vulcanize/eth-statediff-service/eth-statediff-service:${{steps.vars.outputs.sha}} vulcanize/eth-statediff-service:${{steps.vars.outputs.tag}}
|
|
||||||
- name: Docker Push to Docker Hub
|
|
||||||
run: docker push vulcanize/eth-statediff-service:${{steps.vars.outputs.tag}}
|
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,2 +1,3 @@
|
|||||||
.idea/
|
.idea/
|
||||||
eth-statediff-service
|
eth-statediff-service
|
||||||
|
.vscode
|
||||||
|
16
Dockerfile
16
Dockerfile
@ -1,14 +1,14 @@
|
|||||||
FROM golang:1.18-alpine as builder
|
FROM golang:1.19-alpine as builder
|
||||||
|
|
||||||
RUN apk --update --no-cache add make git g++ linux-headers
|
RUN apk --update --no-cache add make git g++ linux-headers
|
||||||
# DEBUG
|
# DEBUG
|
||||||
RUN apk add busybox-extras
|
RUN apk add busybox-extras
|
||||||
|
|
||||||
# Get and build ipfs-blockchain-watcher
|
# Get and build ipfs-blockchain-watcher
|
||||||
ADD . /go/src/github.com/vulcanize/eth-statediff-service
|
ADD . /go/src/github.com/cerc-io/eth-statediff-service
|
||||||
#RUN git clone https://github.com/vulcanize/eth-statediff-service.git /go/src/github.com/vulcanize/eth-statediff-service
|
#RUN git clone https://github.com/cerc-io/eth-statediff-service.git /go/src/github.com/vulcanize/eth-statediff-service
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/vulcanize/eth-statediff-service
|
WORKDIR /go/src/github.com/cerc-io/eth-statediff-service
|
||||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o eth-statediff-service .
|
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o eth-statediff-service .
|
||||||
|
|
||||||
# app container
|
# app container
|
||||||
@ -27,12 +27,12 @@ USER $USER
|
|||||||
|
|
||||||
# chown first so dir is writable
|
# chown first so dir is writable
|
||||||
# note: using $USER is merged, but not in the stable release yet
|
# note: using $USER is merged, but not in the stable release yet
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/eth-statediff-service/$CONFIG_FILE config.toml
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/$CONFIG_FILE config.toml
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/eth-statediff-service/startup_script.sh .
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/startup_script.sh .
|
||||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/eth-statediff-service/environments environments
|
COPY --chown=5000:5000 --from=builder /go/src/github.com/cerc-io/eth-statediff-service/environments environments
|
||||||
|
|
||||||
# keep binaries immutable
|
# keep binaries immutable
|
||||||
COPY --from=builder /go/src/github.com/vulcanize/eth-statediff-service/eth-statediff-service eth-statediff-service
|
COPY --from=builder /go/src/github.com/cerc-io/eth-statediff-service/eth-statediff-service eth-statediff-service
|
||||||
|
|
||||||
EXPOSE $EXPOSE_PORT
|
EXPOSE $EXPOSE_PORT
|
||||||
|
|
||||||
|
4
Makefile
4
Makefile
@ -1,10 +1,10 @@
|
|||||||
## Build docker image
|
## Build docker image
|
||||||
.PHONY: docker-build
|
.PHONY: docker-build
|
||||||
docker-build:
|
docker-build:
|
||||||
docker build -t vulcanize/eth-statediff-service .
|
docker build -t cerc-io/eth-statediff-service .
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: | $(GOOSE)
|
test:
|
||||||
go test -p 1 ./pkg/... -v
|
go test -p 1 ./pkg/... -v
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
363
README.md
363
README.md
@ -8,7 +8,7 @@ Purpose:
|
|||||||
|
|
||||||
Stand up a statediffing service directly on top of a go-ethereum LevelDB instance.
|
Stand up a statediffing service directly on top of a go-ethereum LevelDB instance.
|
||||||
This service can serve historical state data over the same rpc interface as
|
This service can serve historical state data over the same rpc interface as
|
||||||
[statediffing geth](https://github.com/vulcanize/go-ethereum/releases/tag/v1.9.11-statediff-0.0.5) without needing to run a full node
|
[statediffing geth](https://github.com/cerc-io/go-ethereum) without needing to run a full node.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
@ -18,6 +18,116 @@ Build the binary:
|
|||||||
make build
|
make build
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
An example config file:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[leveldb]
|
||||||
|
# LevelDB access mode <local | remote>
|
||||||
|
mode = "local" # LVLDB_MODE
|
||||||
|
|
||||||
|
# in local mode
|
||||||
|
# LevelDB paths
|
||||||
|
path = "/Users/user/Library/Ethereum/geth/chaindata" # LVLDB_PATH
|
||||||
|
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # LVLDB_ANCIENT
|
||||||
|
|
||||||
|
# in remote mode
|
||||||
|
# URL for leveldb-ethdb-rpc endpoint
|
||||||
|
url = "http://127.0.0.1:8082/" # LVLDB_URL
|
||||||
|
|
||||||
|
[server]
|
||||||
|
ipcPath = ".ipc" # SERVICE_IPC_PATH
|
||||||
|
httpPath = "127.0.0.1:8545" # SERVICE_HTTP_PATH
|
||||||
|
|
||||||
|
[statediff]
|
||||||
|
prerun = true # STATEDIFF_PRERUN
|
||||||
|
serviceWorkers = 1 # STATEDIFF_SERVICE_WORKERS
|
||||||
|
workerQueueSize = 1024 # STATEDIFF_WORKER_QUEUE_SIZE
|
||||||
|
trieWorkers = 4 # STATEDIFF_TRIE_WORKERS
|
||||||
|
|
||||||
|
[prerun]
|
||||||
|
only = false # PRERUN_ONLY
|
||||||
|
parallel = true # PRERUN_PARALLEL
|
||||||
|
|
||||||
|
# to perform prerun in a specific range (optional)
|
||||||
|
start = 0 # PRERUN_RANGE_START
|
||||||
|
stop = 100 # PRERUN_RANGE_STOP
|
||||||
|
|
||||||
|
# to perform prerun over multiple ranges (optional)
|
||||||
|
ranges = [
|
||||||
|
[101, 1000]
|
||||||
|
]
|
||||||
|
|
||||||
|
# statediffing params for prerun
|
||||||
|
[prerun.params]
|
||||||
|
intermediateStateNodes = true # PRERUN_INTERMEDIATE_STATE_NODES
|
||||||
|
intermediateStorageNodes = true # PRERUN_INTERMEDIATE_STORAGE_NODES
|
||||||
|
includeBlock = true # PRERUN_INCLUDE_BLOCK
|
||||||
|
includeReceipts = true # PRERUN_INCLUDE_RECEIPTS
|
||||||
|
includeTD = true # PRERUN_INCLUDE_TD
|
||||||
|
includeCode = true # PRERUN_INCLUDE_CODE
|
||||||
|
watchedAddresses = []
|
||||||
|
|
||||||
|
[log]
|
||||||
|
file = "" # LOG_FILE_PATH
|
||||||
|
level = "info" # LOG_LEVEL
|
||||||
|
|
||||||
|
[database]
|
||||||
|
# output type <postgres | file | dump>
|
||||||
|
type = "postgres"
|
||||||
|
|
||||||
|
# with postgres type
|
||||||
|
# db credentials
|
||||||
|
name = "vulcanize_test" # DATABASE_NAME
|
||||||
|
hostname = "localhost" # DATABASE_HOSTNAME
|
||||||
|
port = 5432 # DATABASE_PORT
|
||||||
|
user = "vulcanize" # DATABASE_USER
|
||||||
|
password = "..." # DATABASE_PASSWORD
|
||||||
|
driver = "sqlx" # DATABASE_DRIVER_TYPE <sqlx | pgx>
|
||||||
|
|
||||||
|
# with file type
|
||||||
|
# file mode <sql | csv>
|
||||||
|
fileMode = "csv" # DATABASE_FILE_MODE
|
||||||
|
|
||||||
|
# with SQL file mode
|
||||||
|
filePath = "" # DATABASE_FILE_PATH
|
||||||
|
|
||||||
|
# with CSV file mode
|
||||||
|
fileCsvDir = "output_dir" # DATABASE_FILE_CSV_DIR
|
||||||
|
|
||||||
|
# with dump type
|
||||||
|
# <stdout | stderr | discard>
|
||||||
|
dumpDestination = "" # DATABASE_DUMP_DST
|
||||||
|
|
||||||
|
[cache]
|
||||||
|
database = 1024 # DB_CACHE_SIZE_MB
|
||||||
|
trie = 1024 # TRIE_CACHE_SIZE_MB
|
||||||
|
|
||||||
|
[prom]
|
||||||
|
# prometheus metrics
|
||||||
|
metrics = true # PROM_METRICS
|
||||||
|
http = true # PROM_HTTP
|
||||||
|
httpAddr = "localhost" # PROM_HTTP_ADDR
|
||||||
|
httpPort = "8889" # PROM_HTTP_PORT
|
||||||
|
dbStats = true # PROM_DB_STATS
|
||||||
|
|
||||||
|
[ethereum]
|
||||||
|
# node info
|
||||||
|
nodeID = "" # ETH_NODE_ID
|
||||||
|
clientName = "eth-statediff-service" # ETH_CLIENT_NAME
|
||||||
|
networkID = 1 # ETH_NETWORK_ID
|
||||||
|
chainID = 1 # ETH_CHAIN_ID
|
||||||
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
||||||
|
|
||||||
|
# path to custom chain config file (optional)
|
||||||
|
# keep chainID same as that in chain config file
|
||||||
|
chainConfig = "./chain.json" # ETH_CHAIN_CONFIG
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
pprof = false # DEBUG_PPROF
|
||||||
|
```
|
||||||
|
|
||||||
### Local Setup
|
### Local Setup
|
||||||
|
|
||||||
* Create a chain config file `chain.json` according to chain config in genesis json file used by local geth.
|
* Create a chain config file `chain.json` according to chain config in genesis json file used by local geth.
|
||||||
@ -42,52 +152,19 @@ make build
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
* Change the following in [config file](./environments/config.toml)
|
Provide the path to the above file in the config.
|
||||||
|
|
||||||
```toml
|
|
||||||
[leveldb]
|
|
||||||
mode = "local"
|
|
||||||
# Path to geth LevelDB data
|
|
||||||
path = "/path-to-local-geth-data/chaindata"
|
|
||||||
ancient = "/path-to-local-geth-data/chaindata/ancient"
|
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
chainConfig = "./chain.json" # Path to custom chain config file
|
|
||||||
chainID = 41337 # Same chain ID as in chain.json
|
|
||||||
|
|
||||||
[database]
|
|
||||||
# Update database config
|
|
||||||
name = "vulcanize_testing"
|
|
||||||
hostname = "localhost"
|
|
||||||
port = 5432
|
|
||||||
user = "postgres"
|
|
||||||
password = "postgres"
|
|
||||||
type = "postgres"
|
|
||||||
```
|
|
||||||
|
|
||||||
* To write statediff for a range of block make changes in [config file](./environments/config.toml)
|
|
||||||
```toml
|
|
||||||
[prerun]
|
|
||||||
only = false
|
|
||||||
ranges = [
|
|
||||||
[8, 15] # Block number range for which to write statediff.
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
* To use remote LevelDB RPC endpoint change the following in [config file](./environments/config.toml)
|
|
||||||
```toml
|
|
||||||
[leveldb]
|
|
||||||
mode = "remote"
|
|
||||||
url = "http://127.0.0.1:8082/" # Remote LevelDB RPC url
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
* Create / update the config file (refer to example config above).
|
||||||
|
|
||||||
### `serve`
|
### `serve`
|
||||||
|
|
||||||
To serve state diffs over RPC:
|
* To serve the statediff RPC API:
|
||||||
|
|
||||||
`eth-statediff-service serve --config=<config path>`
|
```bash
|
||||||
|
./eth-statediff-service serve --config=<config path>
|
||||||
|
```
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -95,89 +172,137 @@ Example:
|
|||||||
./eth-statediff-service serve --config environments/config.toml
|
./eth-statediff-service serve --config environments/config.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
Available RPC methods are:
|
* Available RPC methods:
|
||||||
* `statediff_stateTrieAt()`
|
* `statediff_stateTrieAt()`
|
||||||
* `statediff_streamCodeAndCodeHash()`
|
* `statediff_streamCodeAndCodeHash()`
|
||||||
* `statediff_stateDiffAt()`
|
* `statediff_stateDiffAt()`
|
||||||
* `statediff_writeStateDiffAt()`
|
* `statediff_writeStateDiffAt()`
|
||||||
* `statediff_writeStateDiffsInRange()`
|
* `statediff_writeStateDiffsInRange()`
|
||||||
|
|
||||||
e.g. `curl -X POST -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"statediff_writeStateDiffsInRange","params":['"$BEGIN"', '"$END"', {"intermediateStateNodes":true,"intermediateStorageNodes":true,"includeBlock":true,"includeReceipts":true,"includeTD":true,"includeCode":true}],"id":1}' "$HOST":"$PORT"`
|
Example:
|
||||||
|
|
||||||
The process can be configured locally with sets of ranges to process as a "prerun" to processing directed by the server endpoints.
|
```bash
|
||||||
This is done by turning "prerun" on in the config (`statediff.prerun = true`) and defining ranged and params in the
|
curl -X POST -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"statediff_writeStateDiffsInRange","params":['"$BEGIN"', '"$END"', {"intermediateStateNodes":true,"intermediateStorageNodes":true,"includeBlock":true,"includeReceipts":true,"includeTD":true,"includeCode":true}],"id":1}' "$HOST":"$PORT"
|
||||||
`prerun` section of the config as shown below.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
An example config file:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[leveldb]
|
|
||||||
mode = "local"
|
|
||||||
# path and ancient LevelDB paths required in local mode
|
|
||||||
path = "/Users/user/Library/Ethereum/geth/chaindata"
|
|
||||||
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient"
|
|
||||||
# url for leveldb-ethdb-rpc endpoint required in remote mode
|
|
||||||
url = "http://127.0.0.1:8082/"
|
|
||||||
|
|
||||||
[server]
|
|
||||||
ipcPath = ".ipc"
|
|
||||||
httpPath = "127.0.0.1:8545"
|
|
||||||
|
|
||||||
[statediff]
|
|
||||||
prerun = true
|
|
||||||
serviceWorkers = 1
|
|
||||||
workerQueueSize = 1024
|
|
||||||
trieWorkers = 4
|
|
||||||
|
|
||||||
[prerun]
|
|
||||||
only = false
|
|
||||||
ranges = [
|
|
||||||
[0, 1000]
|
|
||||||
]
|
|
||||||
[prerun.params]
|
|
||||||
intermediateStateNodes = true
|
|
||||||
intermediateStorageNodes = true
|
|
||||||
includeBlock = true
|
|
||||||
includeReceipts = true
|
|
||||||
includeTD = true
|
|
||||||
includeCode = true
|
|
||||||
watchedAddresses = []
|
|
||||||
|
|
||||||
[log]
|
|
||||||
file = ""
|
|
||||||
level = "info"
|
|
||||||
|
|
||||||
[eth]
|
|
||||||
chainID = 1
|
|
||||||
|
|
||||||
[database]
|
|
||||||
name = "vulcanize_test"
|
|
||||||
hostname = "localhost"
|
|
||||||
port = 5432
|
|
||||||
user = "vulcanize"
|
|
||||||
password = "..."
|
|
||||||
type = "postgres"
|
|
||||||
driver = "sqlx"
|
|
||||||
dumpDestination = ""
|
|
||||||
filePath = ""
|
|
||||||
|
|
||||||
[cache]
|
|
||||||
database = 1024
|
|
||||||
trie = 1024
|
|
||||||
|
|
||||||
[prom]
|
|
||||||
dbStats = false
|
|
||||||
metrics = true
|
|
||||||
http = true
|
|
||||||
httpAddr = "localhost"
|
|
||||||
httpPort = "8889"
|
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
nodeID = ""
|
|
||||||
clientName = "eth-statediff-service"
|
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
|
||||||
networkID = 1
|
|
||||||
chainID = 1
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Prerun:
|
||||||
|
* The process can be configured locally with sets of ranges to process as a "prerun" to processing directed by the server endpoints.
|
||||||
|
* This is done by turning "prerun" on in the config (`statediff.prerun = true`) and defining ranges and params in the
|
||||||
|
`prerun` section of the config.
|
||||||
|
* Set the range using `prerun.start` and `prerun.stop`. Use `prerun.ranges` if prerun on more than one range is required.
|
||||||
|
|
||||||
|
* NOTE: Currently, `params.includeTD` must be set to / passed as `true`.
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
* Enable metrics using config parameters `prom.metrics` and `prom.http`.
|
||||||
|
* `eth-statediff-service` exposes following prometheus metrics at `/metrics` endpoint:
|
||||||
|
* `ranges_queued`: Number of range requests currently queued.
|
||||||
|
* `loaded_height`: The last block that was loaded for processing.
|
||||||
|
* `processed_height`: The last block that was processed.
|
||||||
|
* `stats.t_block_load`: Block loading time.
|
||||||
|
* `stats.t_block_processing`: Block (header, uncles, txs, rcts, tx trie, rct trie) processing time.
|
||||||
|
* `stats.t_state_processing`: State (state trie, storage tries, and code) processing time.
|
||||||
|
* `stats.t_postgres_tx_commit`: Postgres tx commit time.
|
||||||
|
* `http.count`: HTTP request count.
|
||||||
|
* `http.duration`: HTTP request duration.
|
||||||
|
* `ipc.count`: Unix socket connection count.
|
||||||
|
|
||||||
|
## Tests
|
||||||
|
|
||||||
|
* Run unit tests:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Import output data in file mode into a database
|
||||||
|
|
||||||
|
* When `eth-statediff-service` is run in file mode (`database.type`) the output is in form of a SQL file or multiple CSV files.
|
||||||
|
|
||||||
|
### SQL
|
||||||
|
|
||||||
|
* Assuming the output files are located in host's `./output_dir` directory.
|
||||||
|
|
||||||
|
* Create a directory to store post-processed output:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p output_dir/processed_output
|
||||||
|
```
|
||||||
|
|
||||||
|
* (Optional) Get row counts in the output:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wc -l output_dir/statediff.sql > output_stats.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
* De-duplicate data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sort -u output_dir/statediff.sql -o output_dir/processed_output/deduped-statediff.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
* Copy over the post-processed output files to the DB server (say in `/output_dir`).
|
||||||
|
|
||||||
|
* Run the following to import data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
psql -U <DATABASE_USER> -h <DATABASE_HOSTNAME> -p <DATABASE_PORT> <DATABASE_NAME> --set ON_ERROR_STOP=on -f /output_dir/processed_output/deduped-statediff.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### CSV
|
||||||
|
|
||||||
|
* Create an env file with the required variables. Refer [.sample.env](./scripts/.sample.env).
|
||||||
|
|
||||||
|
* (Optional) Get row counts in the output:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/count-lines.sh <ENV_FILE_PATH>
|
||||||
|
```
|
||||||
|
|
||||||
|
* De-duplicate data:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/dedup.sh <ENV_FILE_PATH>
|
||||||
|
```
|
||||||
|
|
||||||
|
* Perform column checks:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/check-columns.sh <ENV_FILE_PATH>
|
||||||
|
```
|
||||||
|
|
||||||
|
Check the output logs for any rows detected with unexpected number of columns.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# log
|
||||||
|
eth.header_cids
|
||||||
|
Start: Wednesday 21 September 2022 06:00:38 PM IST
|
||||||
|
Time taken: 00:00:05
|
||||||
|
End: Wednesday 21 September 2022 06:00:43 PM IST
|
||||||
|
Total bad rows: 1 ./check-columns/eth.header_cids.txt
|
||||||
|
|
||||||
|
# bad row output
|
||||||
|
# line number, num. of columns, data
|
||||||
|
23 17 22,xxxxxx,0x07f5ea5c94aa8dea60b28f6b6315d92f2b6d78ca4b74ea409adeb191b5a114f2,0x5918487321aa57dd0c50977856c6231e7c4ee79e95b694c7c8830227d77a1ecc,bagiacgzaa726uxeuvkg6uyfsr5vwgfozf4vw26gkjn2ouqe232yzdnnbctza,45,geth,0,0xad8fa8df61b98dbda7acd6ca76d5ce4cbba663d5f608cc940957adcdb94cee8d,0xc621412320a20b4aaff5363bdf063b9d13e394ef82e55689ab703aae5db08e26,0x71ec1c7d81269ce115be81c81f13e1cc2601c292a7f20440a77257ecfdc69940,0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347,\x2000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000,1658408419,/blocks/DMQAP5PKLSKKVDPKMCZI623DCXMS6K3NPDFEW5HKICNN5MMRWWQRJ4Q,1,0x0000000000000000000000000000000000000000
|
||||||
|
```
|
||||||
|
|
||||||
|
* Import data using `timescaledb-parallel-copy`:
|
||||||
|
(requires [`timescaledb-parallel-copy`](https://github.com/timescale/timescaledb-parallel-copy) installation; readily comes with TimescaleDB docker image)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/timescaledb-import.sh <ENV_FILE_PATH>
|
||||||
|
```
|
||||||
|
|
||||||
|
* NOTE: `COPY` command on CSVs inserts empty strings as `NULL` in the DB. Passing `FORCE_NOT_NULL <COLUMN_NAME>` forces it to insert empty strings instead. This is required to maintain compatibility of the imported statediff data with the data generated in `postgres` mode. Reference: https://www.postgresql.org/docs/14/sql-copy.html
|
||||||
|
|
||||||
|
### Stats
|
||||||
|
|
||||||
|
The binary includes a `stats` command which reports stats for the offline or remote levelDB.
|
||||||
|
|
||||||
|
At this time, the only stat supported is to return the latest/highest block height and hash found the levelDB, this is
|
||||||
|
useful for determining what the upper limit is for a standalone statediffing process on a given levelDB.
|
||||||
|
|
||||||
|
`./eth-statediff-service stats --config={path to toml config file}`
|
||||||
|
10
cmd/env.go
10
cmd/env.go
@ -50,6 +50,7 @@ const (
|
|||||||
PROM_DB_STATS = "PROM_DB_STATS"
|
PROM_DB_STATS = "PROM_DB_STATS"
|
||||||
|
|
||||||
PRERUN_ONLY = "PRERUN_ONLY"
|
PRERUN_ONLY = "PRERUN_ONLY"
|
||||||
|
PRERUN_PARALLEL = "PRERUN_PARALLEL"
|
||||||
PRERUN_RANGE_START = "PRERUN_RANGE_START"
|
PRERUN_RANGE_START = "PRERUN_RANGE_START"
|
||||||
PRERUN_RANGE_STOP = "PRERUN_RANGE_STOP"
|
PRERUN_RANGE_STOP = "PRERUN_RANGE_STOP"
|
||||||
PRERUN_INTERMEDIATE_STATE_NODES = "PRERUN_INTERMEDIATE_STATE_NODES"
|
PRERUN_INTERMEDIATE_STATE_NODES = "PRERUN_INTERMEDIATE_STATE_NODES"
|
||||||
@ -72,6 +73,8 @@ const (
|
|||||||
DATABASE_DRIVER_TYPE = "DATABASE_DRIVER_TYPE"
|
DATABASE_DRIVER_TYPE = "DATABASE_DRIVER_TYPE"
|
||||||
DATABASE_DUMP_DST = "DATABASE_DUMP_DST"
|
DATABASE_DUMP_DST = "DATABASE_DUMP_DST"
|
||||||
DATABASE_FILE_PATH = "DATABASE_FILE_PATH"
|
DATABASE_FILE_PATH = "DATABASE_FILE_PATH"
|
||||||
|
DATABASE_FILE_MODE = "DATABASE_FILE_MODE"
|
||||||
|
DATABASE_FILE_CSV_DIR = "DATABASE_FILE_CSV_DIR"
|
||||||
|
|
||||||
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
|
||||||
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
|
||||||
@ -79,6 +82,8 @@ const (
|
|||||||
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
|
||||||
DATABASE_CONN_TIMEOUT = "DATABSE_CONN_TIMEOUT"
|
DATABASE_CONN_TIMEOUT = "DATABSE_CONN_TIMEOUT"
|
||||||
DATABASE_MAX_CONN_IDLE_TIME = "DATABASE_MAX_CONN_IDLE_TIME"
|
DATABASE_MAX_CONN_IDLE_TIME = "DATABASE_MAX_CONN_IDLE_TIME"
|
||||||
|
|
||||||
|
DEBUG_PPROF = "DEBUG_PPROF"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Bind env vars for eth node and DB configuration
|
// Bind env vars for eth node and DB configuration
|
||||||
@ -109,7 +114,9 @@ func init() {
|
|||||||
viper.BindEnv("database.type", DATABASE_TYPE)
|
viper.BindEnv("database.type", DATABASE_TYPE)
|
||||||
viper.BindEnv("database.driver", DATABASE_DRIVER_TYPE)
|
viper.BindEnv("database.driver", DATABASE_DRIVER_TYPE)
|
||||||
viper.BindEnv("database.dumpDestination", DATABASE_DUMP_DST)
|
viper.BindEnv("database.dumpDestination", DATABASE_DUMP_DST)
|
||||||
|
viper.BindEnv("database.fileMode", DATABASE_FILE_MODE)
|
||||||
viper.BindEnv("database.filePath", DATABASE_FILE_PATH)
|
viper.BindEnv("database.filePath", DATABASE_FILE_PATH)
|
||||||
|
viper.BindEnv("database.fileCsvDir", DATABASE_FILE_CSV_DIR)
|
||||||
|
|
||||||
viper.BindEnv("cache.database", DB_CACHE_SIZE_MB)
|
viper.BindEnv("cache.database", DB_CACHE_SIZE_MB)
|
||||||
viper.BindEnv("cache.trie", TRIE_CACHE_SIZE_MB)
|
viper.BindEnv("cache.trie", TRIE_CACHE_SIZE_MB)
|
||||||
@ -131,6 +138,7 @@ func init() {
|
|||||||
|
|
||||||
viper.BindEnv("statediff.prerun", STATEDIFF_PRERUN)
|
viper.BindEnv("statediff.prerun", STATEDIFF_PRERUN)
|
||||||
viper.BindEnv("prerun.only", PRERUN_ONLY)
|
viper.BindEnv("prerun.only", PRERUN_ONLY)
|
||||||
|
viper.BindEnv("prerun.parallel", PRERUN_PARALLEL)
|
||||||
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
viper.BindEnv("prerun.start", PRERUN_RANGE_START)
|
||||||
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
viper.BindEnv("prerun.stop", PRERUN_RANGE_STOP)
|
||||||
viper.BindEnv("prerun.params.intermediateStateNodes", PRERUN_INTERMEDIATE_STATE_NODES)
|
viper.BindEnv("prerun.params.intermediateStateNodes", PRERUN_INTERMEDIATE_STATE_NODES)
|
||||||
@ -142,4 +150,6 @@ func init() {
|
|||||||
|
|
||||||
viper.BindEnv("log.level", LOG_LEVEL)
|
viper.BindEnv("log.level", LOG_LEVEL)
|
||||||
viper.BindEnv("log.file", LOG_FILE_PATH)
|
viper.BindEnv("log.file", LOG_FILE_PATH)
|
||||||
|
|
||||||
|
viper.BindEnv("debug.pprof", DEBUG_PPROF)
|
||||||
}
|
}
|
||||||
|
44
cmd/root.go
44
cmd/root.go
@ -34,7 +34,7 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -140,7 +140,9 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().String("database-type", "postgres", "database type (currently supported: postgres, dump)")
|
rootCmd.PersistentFlags().String("database-type", "postgres", "database type (currently supported: postgres, dump)")
|
||||||
rootCmd.PersistentFlags().String("database-driver", "sqlx", "database driver type (currently supported: sqlx, pgx)")
|
rootCmd.PersistentFlags().String("database-driver", "sqlx", "database driver type (currently supported: sqlx, pgx)")
|
||||||
rootCmd.PersistentFlags().String("database-dump-dst", "stdout", "dump destination (for database-type=dump; options: stdout, stderr, discard)")
|
rootCmd.PersistentFlags().String("database-dump-dst", "stdout", "dump destination (for database-type=dump; options: stdout, stderr, discard)")
|
||||||
rootCmd.PersistentFlags().String("database-file-path", "", "full file path (for database-type=file)")
|
rootCmd.PersistentFlags().String("database-file-mode", "csv", "mode for writing file (for database-type=file; options: csv, sql)")
|
||||||
|
rootCmd.PersistentFlags().String("database-file-csv-dir", "", "full directory path (for database-file-mode=csv)")
|
||||||
|
rootCmd.PersistentFlags().String("database-file-path", "", "full file path (for database-file-mode=sql)")
|
||||||
|
|
||||||
rootCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
|
rootCmd.PersistentFlags().String("eth-node-id", "", "eth node id")
|
||||||
rootCmd.PersistentFlags().String("eth-client-name", "eth-statediff-service", "eth client name")
|
rootCmd.PersistentFlags().String("eth-client-name", "eth-statediff-service", "eth client name")
|
||||||
@ -198,6 +200,8 @@ func init() {
|
|||||||
viper.BindPFlag("database.type", rootCmd.PersistentFlags().Lookup("database-type"))
|
viper.BindPFlag("database.type", rootCmd.PersistentFlags().Lookup("database-type"))
|
||||||
viper.BindPFlag("database.driver", rootCmd.PersistentFlags().Lookup("database-driver"))
|
viper.BindPFlag("database.driver", rootCmd.PersistentFlags().Lookup("database-driver"))
|
||||||
viper.BindPFlag("database.dumpDestination", rootCmd.PersistentFlags().Lookup("database-dump-dst"))
|
viper.BindPFlag("database.dumpDestination", rootCmd.PersistentFlags().Lookup("database-dump-dst"))
|
||||||
|
viper.BindPFlag("database.fileMode", rootCmd.PersistentFlags().Lookup("database-file-mode"))
|
||||||
|
viper.BindPFlag("database.fileCsvDir", rootCmd.PersistentFlags().Lookup("database-file-csv-dir"))
|
||||||
viper.BindPFlag("database.filePath", rootCmd.PersistentFlags().Lookup("database-file-path"))
|
viper.BindPFlag("database.filePath", rootCmd.PersistentFlags().Lookup("database-file-path"))
|
||||||
|
|
||||||
viper.BindPFlag("ethereum.nodeID", rootCmd.PersistentFlags().Lookup("eth-node-id"))
|
viper.BindPFlag("ethereum.nodeID", rootCmd.PersistentFlags().Lookup("eth-node-id"))
|
||||||
@ -217,6 +221,7 @@ func init() {
|
|||||||
viper.BindPFlag("prom.metrics", rootCmd.PersistentFlags().Lookup("prom-metrics"))
|
viper.BindPFlag("prom.metrics", rootCmd.PersistentFlags().Lookup("prom-metrics"))
|
||||||
|
|
||||||
viper.BindPFlag("prerun.only", rootCmd.PersistentFlags().Lookup("prerun-only"))
|
viper.BindPFlag("prerun.only", rootCmd.PersistentFlags().Lookup("prerun-only"))
|
||||||
|
viper.BindPFlag("prerun.parallel", rootCmd.PersistentFlags().Lookup("prerun-parallel"))
|
||||||
viper.BindPFlag("prerun.start", rootCmd.PersistentFlags().Lookup("prerun-start"))
|
viper.BindPFlag("prerun.start", rootCmd.PersistentFlags().Lookup("prerun-start"))
|
||||||
viper.BindPFlag("prerun.stop", rootCmd.PersistentFlags().Lookup("prerun-stop"))
|
viper.BindPFlag("prerun.stop", rootCmd.PersistentFlags().Lookup("prerun-stop"))
|
||||||
viper.BindPFlag("prerun.params.intermediateStateNodes", rootCmd.PersistentFlags().Lookup("prerun-intermediate-state-nodes"))
|
viper.BindPFlag("prerun.params.intermediateStateNodes", rootCmd.PersistentFlags().Lookup("prerun-intermediate-state-nodes"))
|
||||||
@ -226,6 +231,8 @@ func init() {
|
|||||||
viper.BindPFlag("prerun.params.includeTD", rootCmd.PersistentFlags().Lookup("prerun-include-td"))
|
viper.BindPFlag("prerun.params.includeTD", rootCmd.PersistentFlags().Lookup("prerun-include-td"))
|
||||||
viper.BindPFlag("prerun.params.includeCode", rootCmd.PersistentFlags().Lookup("prerun-include-code"))
|
viper.BindPFlag("prerun.params.includeCode", rootCmd.PersistentFlags().Lookup("prerun-include-code"))
|
||||||
|
|
||||||
|
viper.BindPFlag("debug.pprof", rootCmd.PersistentFlags().Lookup("debug-pprof"))
|
||||||
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,18 +304,35 @@ func getConfig(nodeInfo node.Info) (interfaces.Config, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("configuring service for database type: %s", dbType)
|
logWithCommand.Infof("Configuring service for database type: %s", dbType)
|
||||||
var indexerConfig interfaces.Config
|
var indexerConfig interfaces.Config
|
||||||
switch dbType {
|
switch dbType {
|
||||||
case shared.FILE:
|
case shared.FILE:
|
||||||
logWithCommand.Info("starting in sql file writing mode")
|
logWithCommand.Info("Starting in sql file writing mode")
|
||||||
filePathStr := viper.GetString("database.filePath")
|
|
||||||
if filePathStr == "" {
|
fileModeStr := viper.GetString("database.fileMode")
|
||||||
logWithCommand.Fatal("when operating in sql file writing mode a file path must be provided")
|
fileMode, err := file.ResolveFileMode(fileModeStr)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
filePathStr := viper.GetString("database.filePath")
|
||||||
|
if fileMode == file.SQL && filePathStr == "" {
|
||||||
|
logWithCommand.Fatal("When operating in sql file writing mode a file path must be provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCsvDirStr := viper.GetString("database.fileCsvDir")
|
||||||
|
if fileMode == file.CSV && fileCsvDirStr == "" {
|
||||||
|
logWithCommand.Fatal("When operating in csv file writing mode a directory path must be provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
indexerConfig = file.Config{
|
||||||
|
Mode: fileMode,
|
||||||
|
OutputDir: fileCsvDirStr,
|
||||||
|
FilePath: filePathStr,
|
||||||
}
|
}
|
||||||
indexerConfig = file.Config{FilePath: filePathStr}
|
|
||||||
case shared.DUMP:
|
case shared.DUMP:
|
||||||
logWithCommand.Info("starting in data dump mode")
|
logWithCommand.Info("Starting in data dump mode")
|
||||||
dumpDstStr := viper.GetString("database.dumpDestination")
|
dumpDstStr := viper.GetString("database.dumpDestination")
|
||||||
dumpDst, err := dump.ResolveDumpType(dumpDstStr)
|
dumpDst, err := dump.ResolveDumpType(dumpDstStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -325,7 +349,7 @@ func getConfig(nodeInfo node.Info) (interfaces.Config, error) {
|
|||||||
return nil, fmt.Errorf("unrecognized dump destination: %s", dumpDst)
|
return nil, fmt.Errorf("unrecognized dump destination: %s", dumpDst)
|
||||||
}
|
}
|
||||||
case shared.POSTGRES:
|
case shared.POSTGRES:
|
||||||
logWithCommand.Info("starting in postgres mode")
|
logWithCommand.Info("Starting in postgres mode")
|
||||||
driverTypeStr := viper.GetString("database.driver")
|
driverTypeStr := viper.GetString("database.driver")
|
||||||
driverType, err := postgres.ResolveDriverType(driverTypeStr)
|
driverType, err := postgres.ResolveDriverType(driverTypeStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
52
cmd/serve.go
52
cmd/serve.go
@ -16,8 +16,11 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/http"
|
||||||
|
_ "net/http/pprof"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
@ -25,8 +28,8 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
sd "github.com/vulcanize/eth-statediff-service/pkg"
|
sd "github.com/cerc-io/eth-statediff-service/pkg"
|
||||||
srpc "github.com/vulcanize/eth-statediff-service/pkg/rpc"
|
srpc "github.com/cerc-io/eth-statediff-service/pkg/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// serveCmd represents the serve command
|
// serveCmd represents the serve command
|
||||||
@ -47,18 +50,51 @@ func init() {
|
|||||||
rootCmd.AddCommand(serveCmd)
|
rootCmd.AddCommand(serveCmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func maxParallelism() int {
|
||||||
|
maxProcs := runtime.GOMAXPROCS(0)
|
||||||
|
numCPU := runtime.NumCPU()
|
||||||
|
if maxProcs < numCPU {
|
||||||
|
return maxProcs
|
||||||
|
}
|
||||||
|
return numCPU
|
||||||
|
}
|
||||||
|
|
||||||
func serve() {
|
func serve() {
|
||||||
logWithCommand.Info("Running eth-statediff-service serve command")
|
logWithCommand.Info("Running eth-statediff-service serve command")
|
||||||
|
logWithCommand.Infof("Parallelism: %d", maxParallelism())
|
||||||
|
|
||||||
statediffService, err := createStateDiffService()
|
reader, chainConf, nodeInfo := instantiateLevelDBReader()
|
||||||
|
|
||||||
|
// report latest block info
|
||||||
|
header, err := reader.GetLatestHeader()
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatalf("Unable to determine latest header height and hash: %s", err.Error())
|
||||||
|
}
|
||||||
|
if header.Number == nil {
|
||||||
|
logWithCommand.Fatal("Latest header found in levelDB has a nil block height")
|
||||||
|
}
|
||||||
|
logWithCommand.Infof("Latest block found in the levelDB\r\nheight: %s, hash: %s", header.Number.String(), header.Hash().Hex())
|
||||||
|
|
||||||
|
statediffService, err := createStateDiffService(reader, chainConf, nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Enable the pprof agent if configured
|
||||||
|
if viper.GetBool("debug.pprof") {
|
||||||
|
// See: https://www.farsightsecurity.com/blog/txt-record/go-remote-profiling-20161028/
|
||||||
|
// For security reasons: do not use the default http multiplexor elsewhere in this process.
|
||||||
|
go func() {
|
||||||
|
logWithCommand.Info("Starting pprof listener on port 6060")
|
||||||
|
logWithCommand.Fatal(http.ListenAndServe("localhost:6060", nil))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
// short circuit if we only want to perform prerun
|
// short circuit if we only want to perform prerun
|
||||||
if viper.GetBool("prerun.only") {
|
if viper.GetBool("prerun.only") {
|
||||||
if err := statediffService.Run(nil); err != nil {
|
parallel := viper.GetBool("prerun.parallel")
|
||||||
logWithCommand.Fatal("unable to perform prerun: %v", err)
|
if err := statediffService.Run(nil, parallel); err != nil {
|
||||||
|
logWithCommand.Fatal("Unable to perform prerun: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -88,17 +124,17 @@ func startServers(serv sd.StateDiffService) error {
|
|||||||
ipcPath := viper.GetString("server.ipcPath")
|
ipcPath := viper.GetString("server.ipcPath")
|
||||||
httpPath := viper.GetString("server.httpPath")
|
httpPath := viper.GetString("server.httpPath")
|
||||||
if ipcPath == "" && httpPath == "" {
|
if ipcPath == "" && httpPath == "" {
|
||||||
logWithCommand.Fatal("need an ipc path and/or an http path")
|
logWithCommand.Fatal("Need an ipc path and/or an http path")
|
||||||
}
|
}
|
||||||
if ipcPath != "" {
|
if ipcPath != "" {
|
||||||
logWithCommand.Info("starting up IPC server")
|
logWithCommand.Info("Starting up IPC server")
|
||||||
_, _, err := srpc.StartIPCEndpoint(ipcPath, serv.APIs())
|
_, _, err := srpc.StartIPCEndpoint(ipcPath, serv.APIs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if httpPath != "" {
|
if httpPath != "" {
|
||||||
logWithCommand.Info("starting up HTTP server")
|
logWithCommand.Info("Starting up HTTP server")
|
||||||
_, err := srpc.StartHTTPEndpoint(httpPath, serv.APIs(), []string{"statediff"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
_, err := srpc.StartHTTPEndpoint(httpPath, serv.APIs(), []string{"statediff"}, nil, []string{"*"}, rpc.HTTPTimeouts{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
54
cmd/stats.go
Normal file
54
cmd/stats.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright © 2022 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
// statsCmd represents the serve command
|
||||||
|
var statsCmd = &cobra.Command{
|
||||||
|
Use: "stats",
|
||||||
|
Short: "Report stats for cold levelDB",
|
||||||
|
Long: `Usage
|
||||||
|
|
||||||
|
./eth-statediff-service stats --config={path to toml config file}`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
subCommand = cmd.CalledAs()
|
||||||
|
logWithCommand = *logrus.WithField("SubCommand", subCommand)
|
||||||
|
stats()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(statsCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stats() {
|
||||||
|
logWithCommand.Info("Running eth-statediff-service stats command")
|
||||||
|
|
||||||
|
reader, _, _ := instantiateLevelDBReader()
|
||||||
|
|
||||||
|
header, err := reader.GetLatestHeader()
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatalf("Unable to determine latest header height and hash: %s", err.Error())
|
||||||
|
}
|
||||||
|
if header.Number == nil {
|
||||||
|
logWithCommand.Fatal("Latest header found in levelDB has a nil block height")
|
||||||
|
}
|
||||||
|
logWithCommand.Infof("Latest block found in the levelDB\r\nheight: %s, hash: %s", header.Number.String(), header.Hash().Hex())
|
||||||
|
}
|
130
cmd/util.go
130
cmd/util.go
@ -6,83 +6,35 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
"github.com/ethereum/go-ethereum/statediff"
|
||||||
gethsd "github.com/ethereum/go-ethereum/statediff"
|
|
||||||
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
ind "github.com/ethereum/go-ethereum/statediff/indexer"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
sd "github.com/vulcanize/eth-statediff-service/pkg"
|
sd "github.com/cerc-io/eth-statediff-service/pkg"
|
||||||
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
type blockRange [2]uint64
|
type blockRange [2]uint64
|
||||||
|
|
||||||
func createStateDiffService() (sd.StateDiffService, error) {
|
func createStateDiffService(lvlDBReader sd.Reader, chainConf *params.ChainConfig, nodeInfo node.Info) (sd.StateDiffService, error) {
|
||||||
// load some necessary params
|
|
||||||
logWithCommand.Info("Loading statediff service parameters")
|
|
||||||
mode := viper.GetString("leveldb.mode")
|
|
||||||
path := viper.GetString("leveldb.path")
|
|
||||||
ancientPath := viper.GetString("leveldb.ancient")
|
|
||||||
url := viper.GetString("leveldb.url")
|
|
||||||
|
|
||||||
if mode == "local" {
|
|
||||||
if path == "" || ancientPath == "" {
|
|
||||||
logWithCommand.Fatal("Require a valid eth LevelDB primary datastore path and ancient datastore path")
|
|
||||||
}
|
|
||||||
} else if mode == "remote" {
|
|
||||||
if url == "" {
|
|
||||||
logWithCommand.Fatal("Require a valid RPC url for accessing LevelDB")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logWithCommand.Fatal("Invalid mode provided for LevelDB access")
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeInfo := getEthNodeInfo()
|
|
||||||
|
|
||||||
var chainConf *params.ChainConfig
|
|
||||||
var err error
|
|
||||||
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
|
||||||
|
|
||||||
if chainConfigPath != "" {
|
|
||||||
chainConf, err = statediff.LoadConfig(chainConfigPath)
|
|
||||||
} else {
|
|
||||||
chainConf, err = statediff.ChainConfig(nodeInfo.ChainID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logWithCommand.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create LevelDB reader
|
|
||||||
logWithCommand.Info("Creating LevelDB reader")
|
|
||||||
readerConf := sd.LvLDBReaderConfig{
|
|
||||||
TrieConfig: &trie.Config{
|
|
||||||
Cache: viper.GetInt("cache.trie"),
|
|
||||||
Journal: "",
|
|
||||||
Preimages: false,
|
|
||||||
},
|
|
||||||
ChainConfig: chainConf,
|
|
||||||
Mode: mode,
|
|
||||||
Path: path,
|
|
||||||
AncientPath: ancientPath,
|
|
||||||
Url: url,
|
|
||||||
DBCacheSize: viper.GetInt("cache.database"),
|
|
||||||
}
|
|
||||||
lvlDBReader, err := sd.NewLvlDBReader(readerConf)
|
|
||||||
if err != nil {
|
|
||||||
logWithCommand.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create statediff service
|
// create statediff service
|
||||||
logWithCommand.Info("Setting up database")
|
logWithCommand.Info("Setting up database")
|
||||||
conf, err := getConfig(nodeInfo)
|
conf, err := getConfig(nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logWithCommand.Info("Creating statediff indexer")
|
logWithCommand.Info("Creating statediff indexer")
|
||||||
_, indexer, err := ind.NewStateDiffIndexer(context.Background(), chainConf, nodeInfo, conf)
|
db, indexer, err := ind.NewStateDiffIndexer(context.Background(), chainConf, nodeInfo, conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
if conf.Type() == shared.POSTGRES && viper.GetBool("prom.dbStats") {
|
||||||
|
prom.RegisterDBCollector(viper.GetString("database.name"), db)
|
||||||
|
}
|
||||||
|
|
||||||
logWithCommand.Info("Creating statediff service")
|
logWithCommand.Info("Creating statediff service")
|
||||||
sdConf := sd.Config{
|
sdConf := sd.Config{
|
||||||
ServiceWorkers: viper.GetUint("statediff.serviceWorkers"),
|
ServiceWorkers: viper.GetUint("statediff.serviceWorkers"),
|
||||||
@ -97,7 +49,7 @@ func setupPreRunRanges() []sd.RangeRequest {
|
|||||||
if !viper.GetBool("statediff.prerun") {
|
if !viper.GetBool("statediff.prerun") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
preRunParams := gethsd.Params{
|
preRunParams := statediff.Params{
|
||||||
IntermediateStateNodes: viper.GetBool("prerun.params.intermediateStateNodes"),
|
IntermediateStateNodes: viper.GetBool("prerun.params.intermediateStateNodes"),
|
||||||
IntermediateStorageNodes: viper.GetBool("prerun.params.intermediateStorageNodes"),
|
IntermediateStorageNodes: viper.GetBool("prerun.params.intermediateStorageNodes"),
|
||||||
IncludeBlock: viper.GetBool("prerun.params.includeBlock"),
|
IncludeBlock: viper.GetBool("prerun.params.includeBlock"),
|
||||||
@ -134,3 +86,61 @@ func setupPreRunRanges() []sd.RangeRequest {
|
|||||||
|
|
||||||
return blockRanges
|
return blockRanges
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func instantiateLevelDBReader() (sd.Reader, *params.ChainConfig, node.Info) {
|
||||||
|
// load some necessary params
|
||||||
|
logWithCommand.Info("Loading statediff service parameters")
|
||||||
|
mode := viper.GetString("leveldb.mode")
|
||||||
|
path := viper.GetString("leveldb.path")
|
||||||
|
ancientPath := viper.GetString("leveldb.ancient")
|
||||||
|
url := viper.GetString("leveldb.url")
|
||||||
|
|
||||||
|
if mode == "local" {
|
||||||
|
if path == "" || ancientPath == "" {
|
||||||
|
logWithCommand.Fatal("Require a valid eth LevelDB primary datastore path and ancient datastore path")
|
||||||
|
}
|
||||||
|
} else if mode == "remote" {
|
||||||
|
if url == "" {
|
||||||
|
logWithCommand.Fatal("Require a valid RPC url for accessing LevelDB")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logWithCommand.Fatal("Invalid mode provided for LevelDB access")
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeInfo := getEthNodeInfo()
|
||||||
|
|
||||||
|
var chainConf *params.ChainConfig
|
||||||
|
var err error
|
||||||
|
chainConfigPath := viper.GetString("ethereum.chainConfig")
|
||||||
|
|
||||||
|
if chainConfigPath != "" {
|
||||||
|
chainConf, err = statediff.LoadConfig(chainConfigPath)
|
||||||
|
} else {
|
||||||
|
chainConf, err = statediff.ChainConfig(nodeInfo.ChainID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatalf("Unable to instantiate chain config: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// create LevelDB reader
|
||||||
|
logWithCommand.Info("Creating LevelDB reader")
|
||||||
|
readerConf := sd.LvLDBReaderConfig{
|
||||||
|
TrieConfig: &trie.Config{
|
||||||
|
Cache: viper.GetInt("cache.trie"),
|
||||||
|
Journal: "",
|
||||||
|
Preimages: false,
|
||||||
|
},
|
||||||
|
ChainConfig: chainConf,
|
||||||
|
Mode: mode,
|
||||||
|
Path: path,
|
||||||
|
AncientPath: ancientPath,
|
||||||
|
Url: url,
|
||||||
|
DBCacheSize: viper.GetInt("cache.database"),
|
||||||
|
}
|
||||||
|
reader, err := sd.NewLvlDBReader(readerConf)
|
||||||
|
if err != nil {
|
||||||
|
logWithCommand.Fatalf("Unable to instantiate levelDB reader: %s", err.Error())
|
||||||
|
}
|
||||||
|
return reader, chainConf, nodeInfo
|
||||||
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
v "github.com/vulcanize/eth-statediff-service/version"
|
v "github.com/cerc-io/eth-statediff-service/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// versionCmd represents the version command
|
// versionCmd represents the version command
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
mode = "local"
|
mode = "local"
|
||||||
path = "/app/geth-rw/chaindata"
|
path = "/app/geth-rw/chaindata"
|
||||||
ancient = "/app/geth-rw/chaindata/ancient"
|
ancient = "/app/geth-rw/chaindata/ancient"
|
||||||
url = "http://127.0.0.1:8082/"
|
|
||||||
|
|
||||||
[server]
|
[server]
|
||||||
ipcPath = ""
|
ipcPath = ""
|
||||||
@ -31,31 +30,29 @@
|
|||||||
level = "info"
|
level = "info"
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
|
type = "postgres"
|
||||||
name = ""
|
name = ""
|
||||||
hostname = ""
|
hostname = ""
|
||||||
port = 5432
|
port = 5432
|
||||||
user = ""
|
user = ""
|
||||||
password = ""
|
password = ""
|
||||||
type = "postgres"
|
|
||||||
driver = "sqlx"
|
driver = "sqlx"
|
||||||
dumpDestination = ""
|
|
||||||
filePath = ""
|
|
||||||
|
|
||||||
[cache]
|
[cache]
|
||||||
database = 1024
|
database = 1024
|
||||||
trie = 4096
|
trie = 4096
|
||||||
|
|
||||||
[prom]
|
[prom]
|
||||||
dbStats = false
|
|
||||||
metrics = true
|
metrics = true
|
||||||
http = true
|
http = true
|
||||||
httpAddr = "0.0.0.0"
|
httpAddr = "0.0.0.0"
|
||||||
httpPort = 9100
|
httpPort = 9100
|
||||||
|
dbStats = false
|
||||||
|
|
||||||
[ethereum]
|
[ethereum]
|
||||||
chainConfig = ""
|
|
||||||
nodeID = ""
|
nodeID = ""
|
||||||
clientName = "eth-statediff-service"
|
clientName = "eth-statediff-service"
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"
|
||||||
networkID = 1
|
networkID = 1
|
||||||
chainID = 1
|
chainID = 1
|
||||||
|
chainConfig = ""
|
||||||
|
130
go.mod
130
go.mod
@ -1,35 +1,41 @@
|
|||||||
module github.com/vulcanize/eth-statediff-service
|
module github.com/cerc-io/eth-statediff-service
|
||||||
|
|
||||||
go 1.18
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ethereum/go-ethereum v1.10.18
|
github.com/cerc-io/leveldb-ethdb-rpc v1.1.13
|
||||||
github.com/jmoiron/sqlx v1.2.0
|
github.com/ethereum/go-ethereum v1.11.5
|
||||||
github.com/prometheus/client_golang v1.4.0
|
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||||
github.com/sirupsen/logrus v1.7.0
|
github.com/prometheus/client_golang v1.14.0
|
||||||
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/spf13/cobra v1.3.0
|
github.com/spf13/cobra v1.3.0
|
||||||
github.com/spf13/viper v1.10.1
|
github.com/spf13/viper v1.10.1
|
||||||
github.com/vulcanize/go-eth-state-node-iterator v1.0.3
|
|
||||||
github.com/vulcanize/leveldb-ethdb-rpc v0.1.2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
github.com/DataDog/zstd v1.5.2 // indirect
|
||||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/cockroachdb/errors v1.9.1 // indirect
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||||
|
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
||||||
|
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||||
github.com/georgysavva/scany v0.2.9 // indirect
|
github.com/georgysavva/scany v1.2.1 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
|
github.com/go-stack/stack v1.8.1 // indirect
|
||||||
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
@ -48,14 +54,15 @@ require (
|
|||||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||||
github.com/ipfs/go-block-format v0.0.2 // indirect
|
github.com/ipfs/go-block-format v0.0.3 // indirect
|
||||||
github.com/ipfs/go-cid v0.0.7 // indirect
|
github.com/ipfs/go-cid v0.2.0 // indirect
|
||||||
github.com/ipfs/go-datastore v0.4.2 // indirect
|
github.com/ipfs/go-datastore v0.5.1 // indirect
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.0.1 // indirect
|
github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.0.0 // indirect
|
github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect
|
||||||
github.com/ipfs/go-ipfs-util v0.0.1 // indirect
|
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||||
github.com/ipfs/go-ipld-format v0.2.0 // indirect
|
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
||||||
github.com/ipfs/go-log v0.0.1 // indirect
|
github.com/ipfs/go-log v1.0.5 // indirect
|
||||||
|
github.com/ipfs/go-log/v2 v2.1.3 // indirect
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.10.0 // indirect
|
github.com/jackc/pgconn v1.10.0 // indirect
|
||||||
@ -67,65 +74,70 @@ require (
|
|||||||
github.com/jackc/pgx/v4 v4.13.0 // indirect
|
github.com/jackc/pgx/v4 v4.13.0 // indirect
|
||||||
github.com/jackc/puddle v1.1.3 // indirect
|
github.com/jackc/puddle v1.1.3 // indirect
|
||||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 // indirect
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||||
github.com/lib/pq v1.10.2 // indirect
|
github.com/klauspost/compress v1.15.15 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/lib/pq v1.10.7 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.5 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.1.3 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.0.3 // indirect
|
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||||
github.com/multiformats/go-multihash v0.0.14 // indirect
|
github.com/multiformats/go-multihash v0.1.0 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.5 // indirect
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||||
github.com/pganalyze/pg_query_go/v2 v2.1.0 // indirect
|
github.com/pganalyze/pg_query_go/v2 v2.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/common v0.39.0 // indirect
|
||||||
github.com/prometheus/common v0.9.1 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/prometheus/procfs v0.0.8 // indirect
|
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
||||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
|
||||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
|
||||||
github.com/rs/cors v1.7.0 // indirect
|
github.com/rs/cors v1.7.0 // indirect
|
||||||
github.com/shirou/gopsutil v3.21.5+incompatible // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
|
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/spf13/afero v1.6.0 // indirect
|
github.com/spf13/afero v1.6.0 // indirect
|
||||||
github.com/spf13/cast v1.4.1 // indirect
|
github.com/spf13/cast v1.4.1 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
|
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||||
github.com/stretchr/objx v0.2.0 // indirect
|
|
||||||
github.com/stretchr/testify v1.7.0 // indirect
|
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
||||||
github.com/thoas/go-funk v0.9.2 // indirect
|
github.com/thoas/go-funk v0.9.3 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.6 // indirect
|
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||||
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
|
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc // indirect
|
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
|
go.uber.org/zap v1.17.0 // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
golang.org/x/crypto v0.6.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/net v0.6.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
golang.org/x/sys v0.5.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
golang.org/x/text v0.7.0 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
lukechampine.com/blake3 v1.1.6 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.10.18 => github.com/vulcanize/go-ethereum v1.10.18-statediff-3.2.1
|
replace github.com/ethereum/go-ethereum v1.11.5 => github.com/cerc-io/go-ethereum v1.11.5-statediff-4.3.9-alpha
|
||||||
|
2
main.go
2
main.go
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "github.com/vulcanize/eth-statediff-service/cmd"
|
import "github.com/cerc-io/eth-statediff-service/cmd"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd.Execute()
|
cmd.Execute()
|
||||||
|
780
pkg/builder.go
780
pkg/builder.go
@ -20,93 +20,25 @@
|
|||||||
package statediff
|
package statediff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
sd "github.com/ethereum/go-ethereum/statediff"
|
sd "github.com/ethereum/go-ethereum/statediff"
|
||||||
sdtrie "github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
iter "github.com/ethereum/go-ethereum/trie/concurrent_iterator"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
iter "github.com/vulcanize/go-eth-state-node-iterator"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
nullHashBytes = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
|
||||||
emptyNode, _ = rlp.EncodeToBytes(&[]byte{})
|
|
||||||
emptyContractRoot = crypto.Keccak256Hash(emptyNode)
|
|
||||||
nullCodeHash = crypto.Keccak256Hash([]byte{}).Bytes()
|
|
||||||
)
|
|
||||||
|
|
||||||
// Builder interface exposes the method for building a state diff between two blocks
|
|
||||||
type Builder interface {
|
|
||||||
BuildStateDiffObject(args sd.Args, params sd.Params) (sdtypes.StateObject, error)
|
|
||||||
BuildStateTrieObject(current *types.Block) (sdtypes.StateObject, error)
|
|
||||||
WriteStateDiffObject(args sdtypes.StateRoots, params sd.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type builder struct {
|
type builder struct {
|
||||||
stateCache state.Database
|
sd.StateDiffBuilder
|
||||||
numWorkers uint
|
numWorkers uint
|
||||||
}
|
}
|
||||||
|
|
||||||
type iterPair struct {
|
|
||||||
older, newer trie.NodeIterator
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveNode(it trie.NodeIterator, trieDB *trie.Database) (sdtypes.StateNode, []interface{}, error) {
|
|
||||||
nodePath := make([]byte, len(it.Path()))
|
|
||||||
copy(nodePath, it.Path())
|
|
||||||
node, err := trieDB.Node(it.Hash())
|
|
||||||
if err != nil {
|
|
||||||
return sdtypes.StateNode{}, nil, err
|
|
||||||
}
|
|
||||||
var nodeElements []interface{}
|
|
||||||
if err := rlp.DecodeBytes(node, &nodeElements); err != nil {
|
|
||||||
return sdtypes.StateNode{}, nil, err
|
|
||||||
}
|
|
||||||
ty, err := sdtrie.CheckKeyType(nodeElements)
|
|
||||||
if err != nil {
|
|
||||||
return sdtypes.StateNode{}, nil, err
|
|
||||||
}
|
|
||||||
return sdtypes.StateNode{
|
|
||||||
NodeType: ty,
|
|
||||||
Path: nodePath,
|
|
||||||
NodeValue: node,
|
|
||||||
}, nodeElements, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convenience
|
|
||||||
func stateNodeAppender(nodes *[]sdtypes.StateNode) sdtypes.StateNodeSink {
|
|
||||||
return func(node sdtypes.StateNode) error {
|
|
||||||
*nodes = append(*nodes, node)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func storageNodeAppender(nodes *[]sdtypes.StorageNode) sdtypes.StorageNodeSink {
|
|
||||||
return func(node sdtypes.StorageNode) error {
|
|
||||||
*nodes = append(*nodes, node)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func codeMappingAppender(data *[]sdtypes.CodeAndCodeHash) sdtypes.CodeSink {
|
|
||||||
return func(c sdtypes.CodeAndCodeHash) error {
|
|
||||||
*data = append(*data, c)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuilder is used to create a statediff builder
|
// NewBuilder is used to create a statediff builder
|
||||||
func NewBuilder(stateCache state.Database, workers uint) (Builder, error) {
|
func NewBuilder(stateCache state.Database, workers uint) (sd.Builder, error) {
|
||||||
if workers == 0 {
|
if workers == 0 {
|
||||||
workers = 1
|
workers = 1
|
||||||
}
|
}
|
||||||
@ -114,88 +46,20 @@ func NewBuilder(stateCache state.Database, workers uint) (Builder, error) {
|
|||||||
return nil, fmt.Errorf("workers must be a power of 2")
|
return nil, fmt.Errorf("workers must be a power of 2")
|
||||||
}
|
}
|
||||||
return &builder{
|
return &builder{
|
||||||
stateCache: stateCache, // state cache is safe for concurrent reads
|
StateDiffBuilder: sd.StateDiffBuilder{
|
||||||
|
StateCache: stateCache,
|
||||||
|
},
|
||||||
numWorkers: workers,
|
numWorkers: workers,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildStateTrieObject builds a state trie object from the provided block
|
|
||||||
func (sdb *builder) BuildStateTrieObject(current *types.Block) (sdtypes.StateObject, error) {
|
|
||||||
currentTrie, err := sdb.stateCache.OpenTrie(current.Root())
|
|
||||||
if err != nil {
|
|
||||||
return sdtypes.StateObject{}, fmt.Errorf("error creating trie for block %d: %v", current.Number(), err)
|
|
||||||
}
|
|
||||||
it := currentTrie.NodeIterator([]byte{})
|
|
||||||
stateNodes, codeAndCodeHashes, err := sdb.buildStateTrie(it)
|
|
||||||
if err != nil {
|
|
||||||
return sdtypes.StateObject{}, fmt.Errorf("error collecting state nodes for block %d: %v", current.Number(), err)
|
|
||||||
}
|
|
||||||
return sdtypes.StateObject{
|
|
||||||
BlockNumber: current.Number(),
|
|
||||||
BlockHash: current.Hash(),
|
|
||||||
Nodes: stateNodes,
|
|
||||||
CodeAndCodeHashes: codeAndCodeHashes,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sdb *builder) buildStateTrie(it trie.NodeIterator) ([]sdtypes.StateNode, []sdtypes.CodeAndCodeHash, error) {
|
|
||||||
stateNodes := make([]sdtypes.StateNode, 0)
|
|
||||||
codeAndCodeHashes := make([]sdtypes.CodeAndCodeHash, 0)
|
|
||||||
for it.Next(true) {
|
|
||||||
// skip value nodes
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
var account types.StateAccount
|
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
|
||||||
}
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
node.LeafKey = leafKey
|
|
||||||
if !bytes.Equal(account.CodeHash, nullCodeHash) {
|
|
||||||
var storageNodes []sdtypes.StorageNode
|
|
||||||
err := sdb.buildStorageNodesEventual(account.Root, true, storageNodeAppender(&storageNodes))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed building eventual storage diffs for account %+v\r\nerror: %v", account, err)
|
|
||||||
}
|
|
||||||
node.StorageNodes = storageNodes
|
|
||||||
// emit codehash => code mappings for code
|
|
||||||
codeHash := common.BytesToHash(account.CodeHash)
|
|
||||||
code, err := sdb.stateCache.ContractCode(common.Hash{}, codeHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
|
||||||
}
|
|
||||||
codeAndCodeHashes = append(codeAndCodeHashes, sdtypes.CodeAndCodeHash{
|
|
||||||
Hash: codeHash,
|
|
||||||
Code: code,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
stateNodes = append(stateNodes, node)
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
stateNodes = append(stateNodes, node)
|
|
||||||
default:
|
|
||||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stateNodes, codeAndCodeHashes, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
||||||
func (sdb *builder) BuildStateDiffObject(args sd.Args, params sd.Params) (sdtypes.StateObject, error) {
|
func (sdb *builder) BuildStateDiffObject(args sd.Args, params sd.Params) (sdtypes.StateObject, error) {
|
||||||
var stateNodes []sdtypes.StateNode
|
var stateNodes []sdtypes.StateNode
|
||||||
var codeAndCodeHashes []sdtypes.CodeAndCodeHash
|
var codeAndCodeHashes []sdtypes.CodeAndCodeHash
|
||||||
err := sdb.WriteStateDiffObject(
|
err := sdb.WriteStateDiffObject(
|
||||||
sdtypes.StateRoots{OldStateRoot: args.OldStateRoot, NewStateRoot: args.NewStateRoot},
|
args,
|
||||||
params, stateNodeAppender(&stateNodes), codeMappingAppender(&codeAndCodeHashes))
|
params, sd.StateNodeAppender(&stateNodes), sd.CodeMappingAppender(&codeAndCodeHashes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return sdtypes.StateObject{}, err
|
return sdtypes.StateObject{}, err
|
||||||
}
|
}
|
||||||
@ -208,19 +72,13 @@ func (sdb *builder) BuildStateDiffObject(args sd.Args, params sd.Params) (sdtype
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteStateDiffObject writes a statediff object to output callback
|
// WriteStateDiffObject writes a statediff object to output callback
|
||||||
func (sdb *builder) WriteStateDiffObject(args sdtypes.StateRoots, params sd.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
func (sdb *builder) WriteStateDiffObject(args sd.Args, params sd.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
||||||
if len(params.WatchedAddresses) > 0 {
|
|
||||||
// if we are watching only specific accounts then we are only diffing leaf nodes
|
|
||||||
log.Info("Ignoring intermediate state nodes because WatchedAddresses was passed")
|
|
||||||
params.IntermediateStateNodes = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load tries for old and new states
|
// Load tries for old and new states
|
||||||
oldTrie, err := sdb.stateCache.OpenTrie(args.OldStateRoot)
|
oldTrie, err := sdb.StateCache.OpenTrie(args.OldStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
||||||
}
|
}
|
||||||
newTrie, err := sdb.stateCache.OpenTrie(args.NewStateRoot)
|
newTrie, err := sdb.StateCache.OpenTrie(args.NewStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
||||||
}
|
}
|
||||||
@ -234,11 +92,11 @@ func (sdb *builder) WriteStateDiffObject(args sdtypes.StateRoots, params sd.Para
|
|||||||
// Create iterators ahead of time to avoid race condition in state.Trie access
|
// Create iterators ahead of time to avoid race condition in state.Trie access
|
||||||
// We do two state iterations per subtrie: one for new/updated nodes,
|
// We do two state iterations per subtrie: one for new/updated nodes,
|
||||||
// one for deleted/updated nodes; prepare 2 iterator instances for each task
|
// one for deleted/updated nodes; prepare 2 iterator instances for each task
|
||||||
var iterPairs [][]iterPair
|
var iterPairs [][]sd.IterPair
|
||||||
for i := uint(0); i < sdb.numWorkers; i++ {
|
for i := uint(0); i < sdb.numWorkers; i++ {
|
||||||
iterPairs = append(iterPairs, []iterPair{
|
iterPairs = append(iterPairs, []sd.IterPair{
|
||||||
{older: oldIters1[i], newer: newIters1[i]},
|
{Older: oldIters1[i], Newer: newIters1[i]},
|
||||||
{older: oldIters2[i], newer: newIters2[i]},
|
{Older: oldIters2[i], Newer: newIters2[i]},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -255,8 +113,16 @@ func (sdb *builder) WriteStateDiffObject(args sdtypes.StateRoots, params sd.Para
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(worker uint) {
|
go func(worker uint) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := sdb.buildStateDiff(iterPairs[worker], params, nodeSender, codeSender); err != nil {
|
var err error
|
||||||
logrus.Errorf("buildStateDiff error for worker %d, pparams %+v", worker, params)
|
logger := log.New("hash", args.BlockHash.Hex(), "number", args.BlockNumber)
|
||||||
|
if !params.IntermediateStateNodes {
|
||||||
|
err = sdb.BuildStateDiffWithoutIntermediateStateNodes(iterPairs[worker], params, nodeSender, codeSender, logger)
|
||||||
|
} else {
|
||||||
|
err = sdb.BuildStateDiffWithIntermediateStateNodes(iterPairs[worker], params, nodeSender, codeSender, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("buildStateDiff error for worker %d, params %+v", worker, params)
|
||||||
}
|
}
|
||||||
}(w)
|
}(w)
|
||||||
}
|
}
|
||||||
@ -288,599 +154,3 @@ func (sdb *builder) WriteStateDiffObject(args sdtypes.StateRoots, params sd.Para
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *builder) buildStateDiff(args []iterPair, params sd.Params, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
|
||||||
// collect a slice of all the intermediate nodes that were touched and exist at B
|
|
||||||
// a map of their leafkey to all the accounts that were touched and exist at B
|
|
||||||
// and a slice of all the paths for the nodes in both of the above sets
|
|
||||||
var diffAccountsAtB AccountMap
|
|
||||||
var diffPathsAtB map[string]bool
|
|
||||||
var err error
|
|
||||||
if params.IntermediateStateNodes {
|
|
||||||
diffAccountsAtB, diffPathsAtB, err = sdb.createdAndUpdatedStateWithIntermediateNodes(args[0], output)
|
|
||||||
} else {
|
|
||||||
diffAccountsAtB, diffPathsAtB, err = sdb.createdAndUpdatedState(args[0], params.WatchedAddressesLeafKeys())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect a slice of all the nodes that existed at a path in A that doesn't exist in B
|
|
||||||
// a map of their leafkey to all the accounts that were touched and exist at A
|
|
||||||
diffAccountsAtA, err := sdb.deletedOrUpdatedState(args[1], diffAccountsAtB, diffPathsAtB, params.WatchedAddressesLeafKeys(), params.IntermediateStorageNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// collect and sort the leafkeys for both account mappings into a slice
|
|
||||||
createKeys := sortKeys(diffAccountsAtB)
|
|
||||||
deleteKeys := sortKeys(diffAccountsAtA)
|
|
||||||
|
|
||||||
// and then find the intersection of these keys
|
|
||||||
// these are the leafkeys for the accounts which exist at both A and B but are different
|
|
||||||
// this also mutates the passed in createKeys and deleteKeys, removing the intersection keys
|
|
||||||
// and leaving the truly created or deleted keys in place
|
|
||||||
updatedKeys := findIntersection(createKeys, deleteKeys)
|
|
||||||
|
|
||||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
|
||||||
err = sdb.buildAccountUpdates(
|
|
||||||
diffAccountsAtB, diffAccountsAtA, updatedKeys, params.IntermediateStorageNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error building diff for updated accounts: %v", err)
|
|
||||||
}
|
|
||||||
// build the diff nodes for created accounts
|
|
||||||
err = sdb.buildAccountCreations(diffAccountsAtB, params.IntermediateStorageNodes, output, codeOutput)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error building diff for created accounts: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createdAndUpdatedState returns
|
|
||||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
|
||||||
// and a slice of the paths for all of the nodes included in both
|
|
||||||
func (sdb *builder) createdAndUpdatedState(iters iterPair, watchedAddressesLeafKeys map[common.Hash]struct{}) (AccountMap, map[string]bool, error) {
|
|
||||||
diffPathsAtB := make(map[string]bool)
|
|
||||||
diffAcountsAtB := make(AccountMap)
|
|
||||||
it, _ := trie.NewDifferenceIterator(iters.older, iters.newer)
|
|
||||||
for it.Next(true) {
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
|
||||||
// so we need to map all changed accounts at B to their leafkey, since account can change pathes but not leafkey
|
|
||||||
var account types.StateAccount
|
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
|
||||||
}
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
if isWatchedAddress(watchedAddressesLeafKeys, leafKey) {
|
|
||||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
Account: &account,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// add both intermediate and leaf node paths to the list of diffPathsAtB
|
|
||||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
|
||||||
}
|
|
||||||
return diffAcountsAtB, diffPathsAtB, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// createdAndUpdatedStateWithIntermediateNodes returns
|
|
||||||
// a slice of all the intermediate nodes that exist in a different state at B than A
|
|
||||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
|
||||||
// and a slice of the paths for all of the nodes included in both
|
|
||||||
func (sdb *builder) createdAndUpdatedStateWithIntermediateNodes(iters iterPair, output sdtypes.StateNodeSink) (AccountMap, map[string]bool, error) {
|
|
||||||
diffPathsAtB := make(map[string]bool)
|
|
||||||
diffAcountsAtB := make(AccountMap)
|
|
||||||
it, _ := trie.NewDifferenceIterator(iters.older, iters.newer)
|
|
||||||
for it.Next(true) {
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
// created vs updated is important for leaf nodes since we need to diff their storage
|
|
||||||
// so we need to map all changed accounts at B to their leafkey, since account can change paths but not leafkey
|
|
||||||
var account types.StateAccount
|
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
|
||||||
}
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
diffAcountsAtB[common.Bytes2Hex(leafKey)] = accountWrapper{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
Account: &account,
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
// create a diff for any intermediate node that has changed at b
|
|
||||||
// created vs updated makes no difference for intermediate nodes since we do not need to diff storage
|
|
||||||
if err := output(sdtypes.StateNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
// add both intermediate and leaf node paths to the list of diffPathsAtB
|
|
||||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
|
||||||
}
|
|
||||||
return diffAcountsAtB, diffPathsAtB, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// deletedOrUpdatedState returns a slice of all the paths that are emptied at B
|
|
||||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
|
||||||
func (sdb *builder) deletedOrUpdatedState(iters iterPair, diffAccountsAtB AccountMap, diffPathsAtB map[string]bool, watchedAddressesLeafKeys map[common.Hash]struct{}, intermediateStorageNodes bool, output sdtypes.StateNodeSink) (AccountMap, error) {
|
|
||||||
diffAccountAtA := make(AccountMap)
|
|
||||||
it, _ := trie.NewDifferenceIterator(iters.newer, iters.older)
|
|
||||||
for it.Next(true) {
|
|
||||||
// skip value nodes
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
// map all different accounts at A to their leafkey
|
|
||||||
var account types.StateAccount
|
|
||||||
if err := rlp.DecodeBytes(nodeElements[1].([]byte), &account); err != nil {
|
|
||||||
return nil, fmt.Errorf("error decoding account for leaf node at path %x nerror: %v", node.Path, err)
|
|
||||||
}
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
if isWatchedAddress(watchedAddressesLeafKeys, leafKey) {
|
|
||||||
diffAccountAtA[common.Bytes2Hex(leafKey)] = accountWrapper{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
Account: &account,
|
|
||||||
}
|
|
||||||
// if this node's path did not show up in diffPathsAtB
|
|
||||||
// that means the node at this path was deleted (or moved) in B
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
|
||||||
var diff sdtypes.StateNode
|
|
||||||
// if this node's leaf key also did not show up in diffAccountsAtB
|
|
||||||
// that means the node was deleted
|
|
||||||
// in that case, emit an empty "removed" diff state node
|
|
||||||
// include empty "removed" diff storage nodes for all the storage slots
|
|
||||||
if _, ok := diffAccountsAtB[common.Bytes2Hex(leafKey)]; !ok {
|
|
||||||
diff = sdtypes.StateNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
}
|
|
||||||
|
|
||||||
var storageDiffs []sdtypes.StorageNode
|
|
||||||
err := sdb.buildRemovedAccountStorageNodes(account.Root, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed building storage diffs for removed node %x\r\nerror: %v", node.Path, err)
|
|
||||||
}
|
|
||||||
diff.StorageNodes = storageDiffs
|
|
||||||
} else {
|
|
||||||
// emit an empty "removed" diff with empty leaf key if the account was moved
|
|
||||||
diff = sdtypes.StateNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := output(diff); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
// if this node's path did not show up in diffPathsAtB
|
|
||||||
// that means the node at this path was deleted (or moved) in B
|
|
||||||
// emit an empty "removed" diff to signify as such
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
|
||||||
if err := output(sdtypes.StateNode{
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// fall through, we did everything we need to do with these node types
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return diffAccountAtA, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildAccountUpdates uses the account diffs maps for A => B and B => A and the known intersection of their leafkeys
|
|
||||||
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
|
||||||
// needs to be called before building account creations and deletions as this mutates
|
|
||||||
// those account maps to remove the accounts which were updated
|
|
||||||
func (sdb *builder) buildAccountUpdates(creations, deletions AccountMap, updatedKeys []string, intermediateStorageNodes bool, output sdtypes.StateNodeSink) error {
|
|
||||||
var err error
|
|
||||||
for _, key := range updatedKeys {
|
|
||||||
createdAcc := creations[key]
|
|
||||||
deletedAcc := deletions[key]
|
|
||||||
var storageDiffs []sdtypes.StorageNode
|
|
||||||
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
|
||||||
oldSR := deletedAcc.Account.Root
|
|
||||||
newSR := createdAcc.Account.Root
|
|
||||||
err = sdb.buildStorageNodesIncremental(oldSR, newSR, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = output(sdtypes.StateNode{
|
|
||||||
NodeType: createdAcc.NodeType,
|
|
||||||
Path: createdAcc.Path,
|
|
||||||
NodeValue: createdAcc.NodeValue,
|
|
||||||
LeafKey: createdAcc.LeafKey,
|
|
||||||
StorageNodes: storageDiffs,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
delete(creations, key)
|
|
||||||
delete(deletions, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
|
||||||
// it also returns the code and codehash for created contract accounts
|
|
||||||
func (sdb *builder) buildAccountCreations(accounts AccountMap, intermediateStorageNodes bool, output sdtypes.StateNodeSink, codeOutput sdtypes.CodeSink) error {
|
|
||||||
for _, val := range accounts {
|
|
||||||
diff := sdtypes.StateNode{
|
|
||||||
NodeType: val.NodeType,
|
|
||||||
Path: val.Path,
|
|
||||||
LeafKey: val.LeafKey,
|
|
||||||
NodeValue: val.NodeValue,
|
|
||||||
}
|
|
||||||
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
|
||||||
// For contract creations, any storage node contained is a diff
|
|
||||||
var storageDiffs []sdtypes.StorageNode
|
|
||||||
err := sdb.buildStorageNodesEventual(val.Account.Root, intermediateStorageNodes, storageNodeAppender(&storageDiffs))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed building eventual storage diffs for node %x\r\nerror: %v", val.Path, err)
|
|
||||||
}
|
|
||||||
diff.StorageNodes = storageDiffs
|
|
||||||
// emit codehash => code mappings for code
|
|
||||||
codeHash := common.BytesToHash(val.Account.CodeHash)
|
|
||||||
code, err := sdb.stateCache.ContractCode(common.Hash{}, codeHash)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
|
||||||
}
|
|
||||||
if err := codeOutput(sdtypes.CodeAndCodeHash{
|
|
||||||
Hash: codeHash,
|
|
||||||
Code: code,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := output(diff); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
|
||||||
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
|
||||||
func (sdb *builder) buildStorageNodesEventual(sr common.Hash, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debug("Storage Root For Eventual Diff", "root", sr.Hex())
|
|
||||||
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("error in build storage diff eventual", "error", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
it := sTrie.NodeIterator(make([]byte, 0))
|
|
||||||
err = sdb.buildStorageNodesFromTrie(it, intermediateNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node iterator
|
|
||||||
// if any storage keys are provided it will only return those leaf nodes
|
|
||||||
// including intermediate nodes can be turned on or off
|
|
||||||
func (sdb *builder) buildStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
for it.Next(true) {
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
if intermediateNodes {
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildRemovedAccountStorageNodes builds the "removed" diffs for all the storage nodes for a destroyed account
|
|
||||||
func (sdb *builder) buildRemovedAccountStorageNodes(sr common.Hash, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debug("Storage Root For Removed Diffs", "root", sr.Hex())
|
|
||||||
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("error in build removed account storage diffs", "error", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
it := sTrie.NodeIterator(make([]byte, 0))
|
|
||||||
err = sdb.buildRemovedStorageNodesFromTrie(it, intermediateNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildRemovedStorageNodesFromTrie returns diffs for all the storage nodes in the provided node interator
|
|
||||||
// including intermediate nodes can be turned on or off
|
|
||||||
func (sdb *builder) buildRemovedStorageNodesFromTrie(it trie.NodeIterator, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
for it.Next(true) {
|
|
||||||
// skip value nodes
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
LeafKey: leafKey,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
if intermediateNodes {
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
|
||||||
func (sdb *builder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Debug("Storage Roots for Incremental Diff", "old", oldSR.Hex(), "new", newSR.Hex())
|
|
||||||
oldTrie, err := sdb.stateCache.OpenTrie(oldSR)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newTrie, err := sdb.stateCache.OpenTrie(newSR)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
diffSlotsAtB, diffPathsAtB, err := sdb.createdAndUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), intermediateNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}), diffSlotsAtB, diffPathsAtB, intermediateNodes, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sdb *builder) createdAndUpdatedStorage(a, b trie.NodeIterator, intermediateNodes bool, output sdtypes.StorageNodeSink) (map[string]bool, map[string]bool, error) {
|
|
||||||
diffPathsAtB := make(map[string]bool)
|
|
||||||
diffSlotsAtB := make(map[string]bool)
|
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
|
||||||
for it.Next(true) {
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := resolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
diffSlotsAtB[common.Bytes2Hex(leafKey)] = true
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
LeafKey: leafKey,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
if intermediateNodes {
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: node.NodeType,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: node.NodeValue,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, nil, fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
diffPathsAtB[common.Bytes2Hex(node.Path)] = true
|
|
||||||
}
|
|
||||||
return diffSlotsAtB, diffPathsAtB, it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sdb *builder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffSlotsAtB, diffPathsAtB map[string]bool, intermediateNodes bool, output sdtypes.StorageNodeSink) error {
|
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
|
||||||
for it.Next(true) {
|
|
||||||
// skip value nodes
|
|
||||||
if it.Leaf() || bytes.Equal(nullHashBytes, it.Hash().Bytes()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
node, nodeElements, err := sdtrie.ResolveNode(it, sdb.stateCache.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch node.NodeType {
|
|
||||||
case sdtypes.Leaf:
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
|
|
||||||
// if this node's path did not show up in diffPathsAtB
|
|
||||||
// that means the node at this path was deleted (or moved) in B
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
|
||||||
// if this node's leaf key also did not show up in diffSlotsAtB
|
|
||||||
// that means the node was deleted
|
|
||||||
// in that case, emit an empty "removed" diff storage node
|
|
||||||
if _, ok := diffSlotsAtB[common.Bytes2Hex(leafKey)]; !ok {
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
LeafKey: leafKey,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// emit an empty "removed" diff with empty leaf key if the account was moved
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case sdtypes.Extension, sdtypes.Branch:
|
|
||||||
// if this node's path did not show up in diffPathsAtB
|
|
||||||
// that means the node at this path was deleted in B
|
|
||||||
// in that case, emit an empty "removed" diff storage node
|
|
||||||
if _, ok := diffPathsAtB[common.Bytes2Hex(node.Path)]; !ok {
|
|
||||||
if intermediateNodes {
|
|
||||||
if err := output(sdtypes.StorageNode{
|
|
||||||
NodeType: sdtypes.Removed,
|
|
||||||
Path: node.Path,
|
|
||||||
NodeValue: []byte{},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected node type %s", node.NodeType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWatchedAddress is used to check if a state account corresponds to one of the addresses the builder is configured to watch
|
|
||||||
func isWatchedAddress(watchedAddressesLeafKeys map[common.Hash]struct{}, stateLeafKey []byte) bool {
|
|
||||||
// If we aren't watching any specific addresses, we are watching everything
|
|
||||||
if len(watchedAddressesLeafKeys) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := watchedAddressesLeafKeys[common.BytesToHash(stateLeafKey)]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWatchedStorageKey is used to check if a storage leaf corresponds to one of the storage slots the builder is configured to watch
|
|
||||||
func isWatchedStorageKey(watchedKeys []common.Hash, storageLeafKey []byte) bool {
|
|
||||||
// If we aren't watching any specific addresses, we are watching everything
|
|
||||||
if len(watchedKeys) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, hashKey := range watchedKeys {
|
|
||||||
if bytes.Equal(hashKey.Bytes(), storageLeafKey) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
@ -26,20 +26,19 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
sd "github.com/ethereum/go-ethereum/statediff"
|
||||||
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
|
|
||||||
pkg "github.com/vulcanize/eth-statediff-service/pkg"
|
pkg "github.com/cerc-io/eth-statediff-service/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: add test that filters on address
|
|
||||||
var (
|
var (
|
||||||
contractLeafKey []byte
|
contractLeafKey []byte
|
||||||
emptyDiffs = make([]sdtypes.StateNode, 0)
|
emptyDiffs = make([]sdtypes.StateNode, 0)
|
||||||
emptyStorage = make([]sdtypes.StorageNode, 0)
|
emptyStorage = make([]sdtypes.StorageNode, 0)
|
||||||
block0, block1, block2, block3, block4, block5, block6 *types.Block
|
block0, block1, block2, block3, block4, block5, block6 *types.Block
|
||||||
builder pkg.Builder
|
builder sd.Builder
|
||||||
miningReward = int64(2000000000000000000)
|
miningReward = int64(2000000000000000000)
|
||||||
minerAddress = common.HexToAddress("0x0")
|
minerAddress = common.HexToAddress("0x0")
|
||||||
minerLeafKey = test_helpers.AddressToLeafKey(minerAddress)
|
minerLeafKey = test_helpers.AddressToLeafKey(minerAddress)
|
||||||
@ -487,16 +486,16 @@ func TestBuilder(t *testing.T) {
|
|||||||
block1 = blocks[0]
|
block1 = blocks[0]
|
||||||
block2 = blocks[1]
|
block2 = blocks[1]
|
||||||
block3 = blocks[2]
|
block3 = blocks[2]
|
||||||
params := statediff.Params{}
|
params := sd.Params{}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testEmptyDiff",
|
"testEmptyDiff",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -511,7 +510,7 @@ func TestBuilder(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock0",
|
"testBlock0",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: test_helpers.NullHash,
|
OldStateRoot: test_helpers.NullHash,
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -534,7 +533,7 @@ func TestBuilder(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock1",
|
"testBlock1",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block1.Root(),
|
NewStateRoot: block1.Root(),
|
||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
@ -573,7 +572,7 @@ func TestBuilder(t *testing.T) {
|
|||||||
// 1000 transferred from testBankAddress to account1Addr
|
// 1000 transferred from testBankAddress to account1Addr
|
||||||
// 1000 transferred from account1Addr to account2Addr
|
// 1000 transferred from account1Addr to account2Addr
|
||||||
// account1addr creates a new contract
|
// account1addr creates a new contract
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block1.Root(),
|
OldStateRoot: block1.Root(),
|
||||||
NewStateRoot: block2.Root(),
|
NewStateRoot: block2.Root(),
|
||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
@ -644,7 +643,7 @@ func TestBuilder(t *testing.T) {
|
|||||||
"testBlock3",
|
"testBlock3",
|
||||||
//the contract's storage is changed
|
//the contract's storage is changed
|
||||||
//and the block is mined by account 2
|
//and the block is mined by account 2
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block2.Root(),
|
OldStateRoot: block2.Root(),
|
||||||
NewStateRoot: block3.Root(),
|
NewStateRoot: block3.Root(),
|
||||||
BlockNumber: block3.Number(),
|
BlockNumber: block3.Number(),
|
||||||
@ -721,19 +720,19 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
|
|||||||
block2 = blocks[1]
|
block2 = blocks[1]
|
||||||
block3 = blocks[2]
|
block3 = blocks[2]
|
||||||
blocks = append([]*types.Block{block0}, blocks...)
|
blocks = append([]*types.Block{block0}, blocks...)
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
IntermediateStateNodes: true,
|
IntermediateStateNodes: true,
|
||||||
IntermediateStorageNodes: true,
|
IntermediateStorageNodes: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testEmptyDiff",
|
"testEmptyDiff",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -748,7 +747,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock0",
|
"testBlock0",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: test_helpers.NullHash,
|
OldStateRoot: test_helpers.NullHash,
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -771,7 +770,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock1",
|
"testBlock1",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block1.Root(),
|
NewStateRoot: block1.Root(),
|
||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
@ -816,7 +815,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
|
|||||||
// 1000 transferred from testBankAddress to account1Addr
|
// 1000 transferred from testBankAddress to account1Addr
|
||||||
// 1000 transferred from account1Addr to account2Addr
|
// 1000 transferred from account1Addr to account2Addr
|
||||||
// account1addr creates a new contract
|
// account1addr creates a new contract
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block1.Root(),
|
OldStateRoot: block1.Root(),
|
||||||
NewStateRoot: block2.Root(),
|
NewStateRoot: block2.Root(),
|
||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
@ -898,7 +897,7 @@ func TestBuilderWithIntermediateNodes(t *testing.T) {
|
|||||||
"testBlock3",
|
"testBlock3",
|
||||||
//the contract's storage is changed
|
//the contract's storage is changed
|
||||||
//and the block is mined by account 2
|
//and the block is mined by account 2
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block2.Root(),
|
OldStateRoot: block2.Root(),
|
||||||
NewStateRoot: block3.Root(),
|
NewStateRoot: block3.Root(),
|
||||||
BlockNumber: block3.Number(),
|
BlockNumber: block3.Number(),
|
||||||
@ -999,19 +998,21 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
block1 = blocks[0]
|
block1 = blocks[0]
|
||||||
block2 = blocks[1]
|
block2 = blocks[1]
|
||||||
block3 = blocks[2]
|
block3 = blocks[2]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
|
IntermediateStateNodes: true,
|
||||||
|
IntermediateStorageNodes: true,
|
||||||
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
|
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
|
||||||
}
|
}
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testEmptyDiff",
|
"testEmptyDiff",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -1026,7 +1027,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock0",
|
"testBlock0",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: test_helpers.NullHash,
|
OldStateRoot: test_helpers.NullHash,
|
||||||
NewStateRoot: block0.Root(),
|
NewStateRoot: block0.Root(),
|
||||||
BlockNumber: block0.Number(),
|
BlockNumber: block0.Number(),
|
||||||
@ -1041,7 +1042,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"testBlock1",
|
"testBlock1",
|
||||||
//10000 transferred from testBankAddress to account1Addr
|
//10000 transferred from testBankAddress to account1Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block1.Root(),
|
NewStateRoot: block1.Root(),
|
||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
@ -1051,6 +1052,12 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
BlockHash: block1.Hash(),
|
BlockHash: block1.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block1BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0e'},
|
Path: []byte{'\x0e'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1065,7 +1072,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
"testBlock2",
|
"testBlock2",
|
||||||
//1000 transferred from testBankAddress to account1Addr
|
//1000 transferred from testBankAddress to account1Addr
|
||||||
//1000 transferred from account1Addr to account2Addr
|
//1000 transferred from account1Addr to account2Addr
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block1.Root(),
|
OldStateRoot: block1.Root(),
|
||||||
NewStateRoot: block2.Root(),
|
NewStateRoot: block2.Root(),
|
||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
@ -1075,12 +1082,23 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
BlockHash: block2.Hash(),
|
BlockHash: block2.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block2BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: contractAccountAtBlock2LeafNode,
|
NodeValue: contractAccountAtBlock2LeafNode,
|
||||||
StorageNodes: []sdtypes.StorageNode{
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block2StorageBranchRootNode,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x02'},
|
Path: []byte{'\x02'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1115,7 +1133,7 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
"testBlock3",
|
"testBlock3",
|
||||||
//the contract's storage is changed
|
//the contract's storage is changed
|
||||||
//and the block is mined by account 2
|
//and the block is mined by account 2
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block2.Root(),
|
OldStateRoot: block2.Root(),
|
||||||
NewStateRoot: block3.Root(),
|
NewStateRoot: block3.Root(),
|
||||||
BlockNumber: block3.Number(),
|
BlockNumber: block3.Number(),
|
||||||
@ -1125,12 +1143,23 @@ func TestBuilderWithWatchedAddressList(t *testing.T) {
|
|||||||
BlockNumber: block3.Number(),
|
BlockNumber: block3.Number(),
|
||||||
BlockHash: block3.Hash(),
|
BlockHash: block3.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block3BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: contractAccountAtBlock3LeafNode,
|
NodeValue: contractAccountAtBlock3LeafNode,
|
||||||
StorageNodes: []sdtypes.StorageNode{
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block3StorageBranchRootNode,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1177,20 +1206,20 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
|
|||||||
block4 = blocks[3]
|
block4 = blocks[3]
|
||||||
block5 = blocks[4]
|
block5 = blocks[4]
|
||||||
block6 = blocks[5]
|
block6 = blocks[5]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
IntermediateStateNodes: true,
|
IntermediateStateNodes: true,
|
||||||
IntermediateStorageNodes: true,
|
IntermediateStorageNodes: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
|
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
|
||||||
{
|
{
|
||||||
"testBlock4",
|
"testBlock4",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block3.Root(),
|
OldStateRoot: block3.Root(),
|
||||||
NewStateRoot: block4.Root(),
|
NewStateRoot: block4.Root(),
|
||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
@ -1256,7 +1285,7 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock5",
|
"testBlock5",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block4.Root(),
|
OldStateRoot: block4.Root(),
|
||||||
NewStateRoot: block5.Root(),
|
NewStateRoot: block5.Root(),
|
||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
@ -1316,7 +1345,7 @@ func TestBuilderWithRemovedAccountAndStorage(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock6",
|
"testBlock6",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block5.Root(),
|
OldStateRoot: block5.Root(),
|
||||||
NewStateRoot: block6.Root(),
|
NewStateRoot: block6.Root(),
|
||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
@ -1412,20 +1441,20 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
|
|||||||
block4 = blocks[3]
|
block4 = blocks[3]
|
||||||
block5 = blocks[4]
|
block5 = blocks[4]
|
||||||
block6 = blocks[5]
|
block6 = blocks[5]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
IntermediateStateNodes: false,
|
IntermediateStateNodes: false,
|
||||||
IntermediateStorageNodes: false,
|
IntermediateStorageNodes: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
|
// blocks 0-3 are the same as in TestBuilderWithIntermediateNodes
|
||||||
{
|
{
|
||||||
"testBlock4",
|
"testBlock4",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block3.Root(),
|
OldStateRoot: block3.Root(),
|
||||||
NewStateRoot: block4.Root(),
|
NewStateRoot: block4.Root(),
|
||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
@ -1480,7 +1509,7 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock5",
|
"testBlock5",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block4.Root(),
|
OldStateRoot: block4.Root(),
|
||||||
NewStateRoot: block5.Root(),
|
NewStateRoot: block5.Root(),
|
||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
@ -1529,7 +1558,7 @@ func TestBuilderWithRemovedAccountAndStorageWithoutIntermediateNodes(t *testing.
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock6",
|
"testBlock6",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block5.Root(),
|
OldStateRoot: block5.Root(),
|
||||||
NewStateRoot: block6.Root(),
|
NewStateRoot: block6.Root(),
|
||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
@ -1611,19 +1640,21 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
block4 = blocks[3]
|
block4 = blocks[3]
|
||||||
block5 = blocks[4]
|
block5 = blocks[4]
|
||||||
block6 = blocks[5]
|
block6 = blocks[5]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
|
IntermediateStateNodes: true,
|
||||||
|
IntermediateStorageNodes: true,
|
||||||
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.Account2Addr},
|
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.Account2Addr},
|
||||||
}
|
}
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testBlock4",
|
"testBlock4",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block3.Root(),
|
OldStateRoot: block3.Root(),
|
||||||
NewStateRoot: block4.Root(),
|
NewStateRoot: block4.Root(),
|
||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
@ -1633,6 +1664,12 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
BlockHash: block4.Hash(),
|
BlockHash: block4.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block4BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1645,7 +1682,7 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock5",
|
"testBlock5",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block4.Root(),
|
OldStateRoot: block4.Root(),
|
||||||
NewStateRoot: block5.Root(),
|
NewStateRoot: block5.Root(),
|
||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
@ -1655,6 +1692,12 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
BlockHash: block5.Hash(),
|
BlockHash: block5.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block5BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0e'},
|
Path: []byte{'\x0e'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1667,7 +1710,7 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock6",
|
"testBlock6",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block5.Root(),
|
OldStateRoot: block5.Root(),
|
||||||
NewStateRoot: block6.Root(),
|
NewStateRoot: block6.Root(),
|
||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
@ -1677,6 +1720,12 @@ func TestBuilderWithRemovedNonWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
BlockHash: block6.Hash(),
|
BlockHash: block6.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block6BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1729,19 +1778,21 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
block4 = blocks[3]
|
block4 = blocks[3]
|
||||||
block5 = blocks[4]
|
block5 = blocks[4]
|
||||||
block6 = blocks[5]
|
block6 = blocks[5]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
|
IntermediateStateNodes: true,
|
||||||
|
IntermediateStorageNodes: true,
|
||||||
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
|
WatchedAddresses: []common.Address{test_helpers.Account1Addr, test_helpers.ContractAddr},
|
||||||
}
|
}
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testBlock4",
|
"testBlock4",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block3.Root(),
|
OldStateRoot: block3.Root(),
|
||||||
NewStateRoot: block4.Root(),
|
NewStateRoot: block4.Root(),
|
||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
@ -1751,12 +1802,23 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block4.Number(),
|
BlockNumber: block4.Number(),
|
||||||
BlockHash: block4.Hash(),
|
BlockHash: block4.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block4BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: contractAccountAtBlock4LeafNode,
|
NodeValue: contractAccountAtBlock4LeafNode,
|
||||||
StorageNodes: []sdtypes.StorageNode{
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block4StorageBranchRootNode,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x04'},
|
Path: []byte{'\x04'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1782,7 +1844,7 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock5",
|
"testBlock5",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block4.Root(),
|
OldStateRoot: block4.Root(),
|
||||||
NewStateRoot: block5.Root(),
|
NewStateRoot: block5.Root(),
|
||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
@ -1792,12 +1854,23 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block5.Number(),
|
BlockNumber: block5.Number(),
|
||||||
BlockHash: block5.Hash(),
|
BlockHash: block5.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block5BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: contractAccountAtBlock5LeafNode,
|
NodeValue: contractAccountAtBlock5LeafNode,
|
||||||
StorageNodes: []sdtypes.StorageNode{
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block5StorageBranchRootNode,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x0c'},
|
Path: []byte{'\x0c'},
|
||||||
NodeType: sdtypes.Leaf,
|
NodeType: sdtypes.Leaf,
|
||||||
@ -1824,7 +1897,7 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock6",
|
"testBlock6",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block5.Root(),
|
OldStateRoot: block5.Root(),
|
||||||
NewStateRoot: block6.Root(),
|
NewStateRoot: block6.Root(),
|
||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
@ -1834,12 +1907,23 @@ func TestBuilderWithRemovedWatchedAccount(t *testing.T) {
|
|||||||
BlockNumber: block6.Number(),
|
BlockNumber: block6.Number(),
|
||||||
BlockHash: block6.Hash(),
|
BlockHash: block6.Hash(),
|
||||||
Nodes: []sdtypes.StateNode{
|
Nodes: []sdtypes.StateNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Branch,
|
||||||
|
NodeValue: block6BranchRootNode,
|
||||||
|
StorageNodes: emptyStorage,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x06'},
|
Path: []byte{'\x06'},
|
||||||
NodeType: sdtypes.Removed,
|
NodeType: sdtypes.Removed,
|
||||||
LeafKey: contractLeafKey,
|
LeafKey: contractLeafKey,
|
||||||
NodeValue: []byte{},
|
NodeValue: []byte{},
|
||||||
StorageNodes: []sdtypes.StorageNode{
|
StorageNodes: []sdtypes.StorageNode{
|
||||||
|
{
|
||||||
|
Path: []byte{},
|
||||||
|
NodeType: sdtypes.Removed,
|
||||||
|
NodeValue: []byte{},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Path: []byte{'\x02'},
|
Path: []byte{'\x02'},
|
||||||
NodeType: sdtypes.Removed,
|
NodeType: sdtypes.Removed,
|
||||||
@ -1979,19 +2063,19 @@ func TestBuilderWithMovedAccount(t *testing.T) {
|
|||||||
block0 = test_helpers.Genesis
|
block0 = test_helpers.Genesis
|
||||||
block1 = blocks[0]
|
block1 = blocks[0]
|
||||||
block2 = blocks[1]
|
block2 = blocks[1]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
IntermediateStateNodes: true,
|
IntermediateStateNodes: true,
|
||||||
IntermediateStorageNodes: true,
|
IntermediateStorageNodes: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testBlock1",
|
"testBlock1",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block1.Root(),
|
NewStateRoot: block1.Root(),
|
||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
@ -2050,7 +2134,7 @@ func TestBuilderWithMovedAccount(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock2",
|
"testBlock2",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block1.Root(),
|
OldStateRoot: block1.Root(),
|
||||||
NewStateRoot: block2.Root(),
|
NewStateRoot: block2.Root(),
|
||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
@ -2131,19 +2215,19 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
|
|||||||
block0 = test_helpers.Genesis
|
block0 = test_helpers.Genesis
|
||||||
block1 = blocks[0]
|
block1 = blocks[0]
|
||||||
block2 = blocks[1]
|
block2 = blocks[1]
|
||||||
params := statediff.Params{
|
params := sd.Params{
|
||||||
IntermediateStateNodes: false,
|
IntermediateStateNodes: false,
|
||||||
IntermediateStorageNodes: false,
|
IntermediateStorageNodes: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
name string
|
name string
|
||||||
startingArguments statediff.Args
|
startingArguments sd.Args
|
||||||
expected *sdtypes.StateObject
|
expected *sdtypes.StateObject
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"testBlock1",
|
"testBlock1",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block0.Root(),
|
OldStateRoot: block0.Root(),
|
||||||
NewStateRoot: block1.Root(),
|
NewStateRoot: block1.Root(),
|
||||||
BlockNumber: block1.Number(),
|
BlockNumber: block1.Number(),
|
||||||
@ -2191,7 +2275,7 @@ func TestBuilderWithMovedAccountOnlyLeafs(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"testBlock2",
|
"testBlock2",
|
||||||
statediff.Args{
|
sd.Args{
|
||||||
OldStateRoot: block1.Root(),
|
OldStateRoot: block1.Root(),
|
||||||
NewStateRoot: block2.Root(),
|
NewStateRoot: block2.Root(),
|
||||||
BlockNumber: block2.Number(),
|
BlockNumber: block2.Number(),
|
||||||
|
@ -17,9 +17,9 @@
|
|||||||
package prom
|
package prom
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
dbmetrics "github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -29,7 +29,7 @@ const (
|
|||||||
|
|
||||||
// DBStatsGetter is an interface that gets sql.DBStats.
|
// DBStatsGetter is an interface that gets sql.DBStats.
|
||||||
type DBStatsGetter interface {
|
type DBStatsGetter interface {
|
||||||
Stats() sql.DBStats
|
Stats() dbmetrics.DbStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBStatsCollector implements the prometheus.Collector interface.
|
// DBStatsCollector implements the prometheus.Collector interface.
|
||||||
@ -122,41 +122,41 @@ func (c DBStatsCollector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.maxOpenDesc,
|
c.maxOpenDesc,
|
||||||
prometheus.GaugeValue,
|
prometheus.GaugeValue,
|
||||||
float64(stats.MaxOpenConnections),
|
float64(stats.MaxOpen()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.openDesc,
|
c.openDesc,
|
||||||
prometheus.GaugeValue,
|
prometheus.GaugeValue,
|
||||||
float64(stats.OpenConnections),
|
float64(stats.Open()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.inUseDesc,
|
c.inUseDesc,
|
||||||
prometheus.GaugeValue,
|
prometheus.GaugeValue,
|
||||||
float64(stats.InUse),
|
float64(stats.InUse()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.idleDesc,
|
c.idleDesc,
|
||||||
prometheus.GaugeValue,
|
prometheus.GaugeValue,
|
||||||
float64(stats.Idle),
|
float64(stats.Idle()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.waitedForDesc,
|
c.waitedForDesc,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(stats.WaitCount),
|
float64(stats.WaitCount()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.blockedSecondsDesc,
|
c.blockedSecondsDesc,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
stats.WaitDuration.Seconds(),
|
stats.WaitDuration().Seconds(),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.closedMaxIdleDesc,
|
c.closedMaxIdleDesc,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(stats.MaxIdleClosed),
|
float64(stats.MaxIdleClosed()),
|
||||||
)
|
)
|
||||||
ch <- prometheus.MustNewConstMetric(
|
ch <- prometheus.MustNewConstMetric(
|
||||||
c.closedMaxLifetimeDesc,
|
c.closedMaxLifetimeDesc,
|
||||||
prometheus.CounterValue,
|
prometheus.CounterValue,
|
||||||
float64(stats.MaxLifetimeClosed),
|
float64(stats.MaxLifetimeClosed()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ package prom
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
)
|
)
|
||||||
@ -123,7 +122,7 @@ func Init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDBCollector create metric collector for given connection
|
// RegisterDBCollector create metric collector for given connection
|
||||||
func RegisterDBCollector(name string, db *sqlx.DB) {
|
func RegisterDBCollector(name string, db DBStatsGetter) {
|
||||||
if metrics {
|
if metrics {
|
||||||
prometheus.Register(NewDBStatsCollector(name, db))
|
prometheus.Register(NewDBStatsCollector(name, db))
|
||||||
}
|
}
|
||||||
|
@ -16,9 +16,11 @@
|
|||||||
package statediff
|
package statediff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/cerc-io/leveldb-ethdb-rpc/pkg/client"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
@ -26,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/vulcanize/leveldb-ethdb-rpc/pkg/client"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reader interface required by the statediffing service
|
// Reader interface required by the statediffing service
|
||||||
@ -36,6 +37,7 @@ type Reader interface {
|
|||||||
GetReceiptsByHash(hash common.Hash) (types.Receipts, error)
|
GetReceiptsByHash(hash common.Hash) (types.Receipts, error)
|
||||||
GetTdByHash(hash common.Hash) (*big.Int, error)
|
GetTdByHash(hash common.Hash) (*big.Int, error)
|
||||||
StateDB() state.Database
|
StateDB() state.Database
|
||||||
|
GetLatestHeader() (*types.Header, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LvlDBReader exposes the necessary Reader methods on lvldb
|
// LvlDBReader exposes the necessary Reader methods on lvldb
|
||||||
@ -60,7 +62,8 @@ func NewLvlDBReader(conf LvLDBReaderConfig) (*LvlDBReader, error) {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if conf.Mode == "local" {
|
if conf.Mode == "local" {
|
||||||
edb, err = rawdb.NewLevelDBDatabaseWithFreezer(conf.Path, conf.DBCacheSize, 256, conf.AncientPath, "eth-statediff-service", true)
|
kvdb, _ := rawdb.NewLevelDBDatabase(conf.Path, conf.DBCacheSize, 256, "eth-statediff-service", true)
|
||||||
|
edb, err = rawdb.NewDatabaseWithFreezer(kvdb, conf.AncientPath, "eth-statediff-service", true)
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.Mode == "remote" {
|
if conf.Mode == "remote" {
|
||||||
@ -129,3 +132,12 @@ func (ldr *LvlDBReader) GetTdByHash(hash common.Hash) (*big.Int, error) {
|
|||||||
func (ldr *LvlDBReader) StateDB() state.Database {
|
func (ldr *LvlDBReader) StateDB() state.Database {
|
||||||
return ldr.stateDB
|
return ldr.stateDB
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLatestHeader gets the latest header from the levelDB
|
||||||
|
func (ldr *LvlDBReader) GetLatestHeader() (*types.Header, error) {
|
||||||
|
header := rawdb.ReadHeadHeader(ldr.ethDB)
|
||||||
|
if header == nil {
|
||||||
|
return nil, errors.New("unable to read head header")
|
||||||
|
}
|
||||||
|
return header, nil
|
||||||
|
}
|
||||||
|
@ -24,14 +24,14 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
|
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules.
|
||||||
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
func StartHTTPEndpoint(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string, timeouts rpc.HTTPTimeouts) (*rpc.Server, error) {
|
||||||
|
|
||||||
srv := rpc.NewServer()
|
srv := rpc.NewServer()
|
||||||
err := node.RegisterApis(apis, modules, srv, false)
|
err := node.RegisterApis(apis, modules, srv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Could not register HTTP API: %w", err)
|
utils.Fatalf("Could not register HTTP API: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/eth-statediff-service/pkg/prom"
|
"github.com/cerc-io/eth-statediff-service/pkg/prom"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultQueueSize = 1024
|
const defaultQueueSize = 1024
|
||||||
@ -48,7 +48,7 @@ type StateDiffService interface {
|
|||||||
// Loop is the main event loop for processing state diffs
|
// Loop is the main event loop for processing state diffs
|
||||||
Loop(wg *sync.WaitGroup) error
|
Loop(wg *sync.WaitGroup) error
|
||||||
// Run is a one-off command to run on a predefined set of ranges
|
// Run is a one-off command to run on a predefined set of ranges
|
||||||
Run(ranges []RangeRequest) error
|
Run(ranges []RangeRequest, parallel bool) error
|
||||||
// StateDiffAt method to get state diff object at specific block
|
// StateDiffAt method to get state diff object at specific block
|
||||||
StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Payload, error)
|
StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Payload, error)
|
||||||
// StateDiffFor method to get state diff object at specific block
|
// StateDiffFor method to get state diff object at specific block
|
||||||
@ -66,7 +66,7 @@ type StateDiffService interface {
|
|||||||
// Service is the underlying struct for the state diffing service
|
// Service is the underlying struct for the state diffing service
|
||||||
type Service struct {
|
type Service struct {
|
||||||
// Used to build the state diff objects
|
// Used to build the state diff objects
|
||||||
Builder Builder
|
Builder sd.Builder
|
||||||
// Used to read data from LevelDB
|
// Used to read data from LevelDB
|
||||||
lvlDBReader Reader
|
lvlDBReader Reader
|
||||||
// Used to signal shutdown of the service
|
// Used to signal shutdown of the service
|
||||||
@ -117,19 +117,82 @@ func (sds *Service) APIs() []rpc.API {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func segmentRange(workers, start, stop uint64, params sd.Params) []RangeRequest {
|
||||||
|
segmentSize := ((stop - start) + 1) / workers
|
||||||
|
remainder := ((stop - start) + 1) % workers
|
||||||
|
numOfSegments := workers
|
||||||
|
if remainder > 0 {
|
||||||
|
numOfSegments++
|
||||||
|
}
|
||||||
|
segments := make([]RangeRequest, numOfSegments)
|
||||||
|
for i := range segments {
|
||||||
|
end := start + segmentSize - 1
|
||||||
|
if end > stop {
|
||||||
|
end = stop
|
||||||
|
}
|
||||||
|
segments[i] = RangeRequest{start, end, params}
|
||||||
|
start = end + 1
|
||||||
|
}
|
||||||
|
return segments
|
||||||
|
}
|
||||||
|
|
||||||
// Run does a one-off processing run on the provided RangeRequests + any pre-runs, exiting afterwards
|
// Run does a one-off processing run on the provided RangeRequests + any pre-runs, exiting afterwards
|
||||||
func (sds *Service) Run(rngs []RangeRequest) error {
|
func (sds *Service) Run(rngs []RangeRequest, parallel bool) error {
|
||||||
for _, preRun := range sds.preruns {
|
for _, preRun := range sds.preruns {
|
||||||
logrus.Infof("processing prerun range (%d, %d)", preRun.Start, preRun.Stop)
|
// if the rangeSize is smaller than the number of workers
|
||||||
|
// make sure we do synchronous processing to avoid quantization issues
|
||||||
|
rangeSize := (preRun.Stop - preRun.Start) + 1
|
||||||
|
numWorkers := uint64(sds.workers)
|
||||||
|
if rangeSize < numWorkers {
|
||||||
|
parallel = false
|
||||||
|
}
|
||||||
|
if parallel {
|
||||||
|
logrus.Infof("parallel processing prerun range (%d, %d) (%d blocks) divided into %d sized chunks with %d workers", preRun.Start, preRun.Stop,
|
||||||
|
rangeSize, rangeSize/numWorkers, numWorkers)
|
||||||
|
workChan := make(chan RangeRequest)
|
||||||
|
quitChan := make(chan struct{})
|
||||||
|
// spin up numWorkers number of worker goroutines
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
for i := 0; i < int(numWorkers); i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(id int) {
|
||||||
|
defer wg.Done()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case workerSegment := <-workChan:
|
||||||
|
for j := workerSegment.Start; j <= workerSegment.Stop; j++ {
|
||||||
|
if err := sds.WriteStateDiffAt(j, workerSegment.Params); err != nil {
|
||||||
|
logrus.Errorf("error writing statediff at height %d in range (%d, %d) : %v", id, workerSegment.Start, workerSegment.Stop, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logrus.Infof("prerun worker %d finished processing range (%d, %d)", id, workerSegment.Start, workerSegment.Stop)
|
||||||
|
case <-quitChan:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
// break range up into segments
|
||||||
|
segments := segmentRange(numWorkers, preRun.Start, preRun.Stop, preRun.Params)
|
||||||
|
// send the segments to the work channel
|
||||||
|
for _, segment := range segments {
|
||||||
|
workChan <- segment
|
||||||
|
}
|
||||||
|
close(quitChan)
|
||||||
|
wg.Wait()
|
||||||
|
} else {
|
||||||
|
logrus.Infof("sequential processing prerun range (%d, %d)", preRun.Start, preRun.Stop)
|
||||||
for i := preRun.Start; i <= preRun.Stop; i++ {
|
for i := preRun.Start; i <= preRun.Stop; i++ {
|
||||||
if err := sds.WriteStateDiffAt(i, preRun.Params); err != nil {
|
if err := sds.WriteStateDiffAt(i, preRun.Params); err != nil {
|
||||||
return fmt.Errorf("error writing statediff at height %d in range (%d, %d) : %v", i, preRun.Start, preRun.Stop, err)
|
return fmt.Errorf("error writing statediff at height %d in range (%d, %d) : %v", i, preRun.Start, preRun.Stop, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
sds.preruns = nil
|
sds.preruns = nil
|
||||||
|
// At present this code is never called so we have not written the parallel version:
|
||||||
for _, rng := range rngs {
|
for _, rng := range rngs {
|
||||||
logrus.Infof("processing prerun range (%d, %d)", rng.Start, rng.Stop)
|
logrus.Infof("processing requested range (%d, %d)", rng.Start, rng.Stop)
|
||||||
for i := rng.Start; i <= rng.Stop; i++ {
|
for i := rng.Start; i <= rng.Stop; i++ {
|
||||||
if err := sds.WriteStateDiffAt(i, rng.Params); err != nil {
|
if err := sds.WriteStateDiffAt(i, rng.Params); err != nil {
|
||||||
return fmt.Errorf("error writing statediff at height %d in range (%d, %d) : %v", i, rng.Start, rng.Stop, err)
|
return fmt.Errorf("error writing statediff at height %d in range (%d, %d) : %v", i, rng.Start, rng.Stop, err)
|
||||||
@ -195,8 +258,8 @@ func (sds *Service) StateDiffAt(blockNumber uint64, params sd.Params) (*sd.Paylo
|
|||||||
}
|
}
|
||||||
logrus.Infof("sending state diff at block %d", blockNumber)
|
logrus.Infof("sending state diff at block %d", blockNumber)
|
||||||
|
|
||||||
// compute leaf keys of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
if blockNumber == 0 {
|
if blockNumber == 0 {
|
||||||
return sds.processStateDiff(currentBlock, common.Hash{}, params)
|
return sds.processStateDiff(currentBlock, common.Hash{}, params)
|
||||||
@ -217,8 +280,8 @@ func (sds *Service) StateDiffFor(blockHash common.Hash, params sd.Params) (*sd.P
|
|||||||
}
|
}
|
||||||
logrus.Infof("sending state diff at block %s", blockHash.Hex())
|
logrus.Infof("sending state diff at block %s", blockHash.Hex())
|
||||||
|
|
||||||
// compute leaf keys of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
if currentBlock.NumberU64() == 0 {
|
if currentBlock.NumberU64() == 0 {
|
||||||
return sds.processStateDiff(currentBlock, common.Hash{}, params)
|
return sds.processStateDiff(currentBlock, common.Hash{}, params)
|
||||||
@ -290,8 +353,8 @@ func (sds *Service) StateTrieAt(blockNumber uint64, params sd.Params) (*sd.Paylo
|
|||||||
}
|
}
|
||||||
logrus.Infof("sending state trie at block %d", blockNumber)
|
logrus.Infof("sending state trie at block %d", blockNumber)
|
||||||
|
|
||||||
// compute leaf keys of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
return sds.processStateTrie(currentBlock, params)
|
return sds.processStateTrie(currentBlock, params)
|
||||||
}
|
}
|
||||||
@ -333,8 +396,8 @@ func (sds *Service) WriteStateDiffAt(blockNumber uint64, params sd.Params) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute leaf keys of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
parentRoot := common.Hash{}
|
parentRoot := common.Hash{}
|
||||||
if blockNumber != 0 {
|
if blockNumber != 0 {
|
||||||
@ -358,8 +421,8 @@ func (sds *Service) WriteStateDiffFor(blockHash common.Hash, params sd.Params) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// compute leaf keys of watched addresses in the params
|
// compute leaf paths of watched addresses in the params
|
||||||
params.ComputeWatchedAddressesLeafKeys()
|
params.ComputeWatchedAddressesLeafPaths()
|
||||||
|
|
||||||
parentRoot := common.Hash{}
|
parentRoot := common.Hash{}
|
||||||
if currentBlock.NumberU64() != 0 {
|
if currentBlock.NumberU64() != 0 {
|
||||||
@ -406,7 +469,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
|
|||||||
}
|
}
|
||||||
prom.SetTimeMetric(prom.T_BLOCK_PROCESSING, time.Now().Sub(t))
|
prom.SetTimeMetric(prom.T_BLOCK_PROCESSING, time.Now().Sub(t))
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
err = sds.Builder.WriteStateDiffObject(sdtypes.StateRoots{
|
err = sds.Builder.WriteStateDiffObject(sd.Args{
|
||||||
NewStateRoot: block.Root(),
|
NewStateRoot: block.Root(),
|
||||||
OldStateRoot: parentRoot,
|
OldStateRoot: parentRoot,
|
||||||
}, params, output, codeOutput)
|
}, params, output, codeOutput)
|
||||||
|
28
scripts/.env.example
Normal file
28
scripts/.env.example
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Used by the script to count rows (count-lines.sh)
|
||||||
|
COUNT_LINES_LOG=./count-lines.log
|
||||||
|
COUNT_LINES_INPUT_DIR=~/eth-statediff-service/output_dir
|
||||||
|
COUNT_LINES_OUTPUT_FILE=./output-stats.txt
|
||||||
|
|
||||||
|
# Used by the script to dedup output files (dedup.sh)
|
||||||
|
DEDUP_LOG=./dedup.log
|
||||||
|
DEDUP_INPUT_DIR=~/eth-statediff-service/output_dir
|
||||||
|
DEDUP_OUTPUT_DIR=~/eth-statediff-service/dedup_dir
|
||||||
|
DEDUP_SORT_DIR=./.sort
|
||||||
|
|
||||||
|
# Used by the script to perform column checks (check-columns.sh)
|
||||||
|
CHECK_COLUMNS_LOG=./check-columns.log
|
||||||
|
CHECK_COLUMNS_INPUT_DIR=~/eth-statediff-service/output_dir
|
||||||
|
CHECK_COLUMNS_INPUT_DEDUP_DIR=~/eth-statediff-service/dedup_dir
|
||||||
|
CHECK_COLUMNS_OUTPUT_DIR=./check-columns
|
||||||
|
|
||||||
|
# Used by the script to import data (timescaledb-import.sh)
|
||||||
|
IMPORT_LOG=./tsdb-import.log
|
||||||
|
IMPORT_INPUT_DIR=~/eth-statediff-service/output_dir
|
||||||
|
IMPORT_INPUT_DEDUP_DIR=~/eth-statediff-service/dedup_dir
|
||||||
|
TIMESCALEDB_WORKERS=8
|
||||||
|
|
||||||
|
DATABASE_USER=vdbm
|
||||||
|
DATABASE_HOSTNAME=localhost
|
||||||
|
DATABASE_PORT=8077
|
||||||
|
DATABASE_NAME=vulcanize_testing
|
||||||
|
DATABASE_PASSWORD=password
|
58
scripts/check-columns.sh
Executable file
58
scripts/check-columns.sh
Executable file
@ -0,0 +1,58 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Requires:
|
||||||
|
# CHECK_COLUMNS_LOG
|
||||||
|
# CHECK_COLUMNS_INPUT_DIR
|
||||||
|
# CHECK_COLUMNS_INPUT_DEDUP_DIR
|
||||||
|
# CHECK_COLUMNS_OUTPUT_DIR
|
||||||
|
|
||||||
|
# env file arg
|
||||||
|
ENV=$1
|
||||||
|
echo "Using env file: ${ENV}"
|
||||||
|
|
||||||
|
# read env file
|
||||||
|
export $(grep -v '^#' ${ENV} | xargs)
|
||||||
|
|
||||||
|
# redirect stdout/stderr to a file
|
||||||
|
exec >"${CHECK_COLUMNS_LOG}" 2>&1
|
||||||
|
|
||||||
|
# create output dir if not exists
|
||||||
|
mkdir -p "${CHECK_COLUMNS_OUTPUT_DIR}"
|
||||||
|
|
||||||
|
start_timestamp=$(date +%s)
|
||||||
|
|
||||||
|
declare -A expected_columns
|
||||||
|
expected_columns=(
|
||||||
|
["public.nodes"]="5"
|
||||||
|
["public.blocks"]="3"
|
||||||
|
# ["eth.access_list_elements"]="?" # skipping as values include ','
|
||||||
|
["eth.log_cids"]="12"
|
||||||
|
["eth.state_accounts"]="7"
|
||||||
|
["eth.storage_cids"]="9"
|
||||||
|
["eth.uncle_cids"]="7"
|
||||||
|
["eth.header_cids"]="16"
|
||||||
|
["eth.receipt_cids"]="10"
|
||||||
|
["eth.state_cids"]="8"
|
||||||
|
["eth.transaction_cids"]="11"
|
||||||
|
)
|
||||||
|
|
||||||
|
for table_name in "${!expected_columns[@]}";
|
||||||
|
do
|
||||||
|
if [ "${table_name}" = "public.blocks" ];
|
||||||
|
then
|
||||||
|
command="$(dirname "$0")/find-bad-rows.sh -i ${CHECK_COLUMNS_INPUT_DEDUP_DIR}/deduped-${table_name}.csv -c ${expected_columns[${table_name}]} -d true -o ${CHECK_COLUMNS_OUTPUT_DIR}/${table_name}.txt"
|
||||||
|
else
|
||||||
|
command="$(dirname "$0")/find-bad-rows.sh -i ${CHECK_COLUMNS_INPUT_DIR}/${table_name}.csv -c ${expected_columns[${table_name}]} -d true -o ${CHECK_COLUMNS_OUTPUT_DIR}/${table_name}.txt"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${table_name}"
|
||||||
|
echo Start: "$(date)"
|
||||||
|
eval "${command}"
|
||||||
|
echo End: "$(date)"
|
||||||
|
echo Total bad rows: $(wc -l ${CHECK_COLUMNS_OUTPUT_DIR}/${table_name}.txt)
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-start_timestamp))
|
||||||
|
echo Time taken: $((difference/86400)):$(date -d@${difference} -u +%H:%M:%S)
|
||||||
|
echo
|
46
scripts/count-lines.sh
Executable file
46
scripts/count-lines.sh
Executable file
@ -0,0 +1,46 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Requires:
|
||||||
|
# COUNT_LINES_LOG
|
||||||
|
# COUNT_LINES_INPUT_DIR
|
||||||
|
# COUNT_LINES_OUTPUT_FILE
|
||||||
|
|
||||||
|
# env file arg
|
||||||
|
ENV=$1
|
||||||
|
echo "Using env file: ${ENV}"
|
||||||
|
|
||||||
|
# read env file
|
||||||
|
export $(grep -v '^#' ${ENV} | xargs)
|
||||||
|
|
||||||
|
# redirect stdout/stderr to a file
|
||||||
|
exec >"${COUNT_LINES_LOG}" 2>&1
|
||||||
|
|
||||||
|
start_timestamp=$(date +%s)
|
||||||
|
|
||||||
|
table_names=(
|
||||||
|
"public.nodes"
|
||||||
|
"public.blocks"
|
||||||
|
"eth.access_list_elements"
|
||||||
|
"eth.log_cids"
|
||||||
|
"eth.state_accounts"
|
||||||
|
"eth.storage_cids"
|
||||||
|
"eth.uncle_cids"
|
||||||
|
"eth.header_cids"
|
||||||
|
"eth.receipt_cids"
|
||||||
|
"eth.state_cids"
|
||||||
|
"eth.transaction_cids"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "Row counts:" > "${COUNT_LINES_OUTPUT_FILE}"
|
||||||
|
|
||||||
|
for table_name in "${table_names[@]}";
|
||||||
|
do
|
||||||
|
echo "${table_name}";
|
||||||
|
echo Start: "$(date)"
|
||||||
|
wc -l "${COUNT_LINES_INPUT_DIR}"/"${table_name}.csv" >> "${COUNT_LINES_OUTPUT_FILE}"
|
||||||
|
echo End: "$(date)"
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-start_timestamp))
|
||||||
|
echo Time taken: $((difference/86400)):$(date -d@${difference} -u +%H:%M:%S)
|
35
scripts/dedup.sh
Executable file
35
scripts/dedup.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Requires:
|
||||||
|
# DEDUP_LOG
|
||||||
|
# DEDUP_INPUT_DIR
|
||||||
|
# DEDUP_OUTPUT_DIR
|
||||||
|
# DEDUP_SORT_DIR
|
||||||
|
|
||||||
|
# env file arg
|
||||||
|
ENV=$1
|
||||||
|
echo "Using env file: ${ENV}"
|
||||||
|
|
||||||
|
# read env file
|
||||||
|
export $(grep -v '^#' ${ENV} | xargs)
|
||||||
|
|
||||||
|
# redirect stdout/stderr to a file
|
||||||
|
exec >"${DEDUP_LOG}" 2>&1
|
||||||
|
|
||||||
|
# create output dir if not exists
|
||||||
|
mkdir -p "${DEDUP_OUTPUT_DIR}"
|
||||||
|
|
||||||
|
start_timestamp=$(date +%s)
|
||||||
|
|
||||||
|
echo "public.blocks"
|
||||||
|
echo Start: "$(date)"
|
||||||
|
sort -T "${DEDUP_SORT_DIR}" -u "${DEDUP_INPUT_DIR}"/public.blocks.csv -o "${DEDUP_OUTPUT_DIR}"/deduped-public.blocks.csv
|
||||||
|
echo End: "$(date)"
|
||||||
|
echo Total deduped rows: $(wc -l ${DEDUP_OUTPUT_DIR}/deduped-public.blocks.csv)
|
||||||
|
echo
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-start_timestamp))
|
||||||
|
echo Time taken: $((difference/86400)):$(date -d@${difference} -u +%H:%M:%S)
|
||||||
|
|
||||||
|
# NOTE: This script currently only dedups public.blocks output file.
|
||||||
|
# If the output contains blocks that were statediffed more than once, output files for other tables will have to be deduped as well.
|
43
scripts/find-bad-rows.sh
Executable file
43
scripts/find-bad-rows.sh
Executable file
@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# flags
|
||||||
|
# -i <input-file>: Input data file path
|
||||||
|
# -c <expected-columns>: Expected number of columns in each row of the input file
|
||||||
|
# -o [output-file]: Output destination file path (default: STDOUT)
|
||||||
|
# -d [include-data]: Whether to include the data row in output (true | false) (default: false)
|
||||||
|
|
||||||
|
# eg: ./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
||||||
|
# output: 1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,
|
||||||
|
# baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,
|
||||||
|
# DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
|
||||||
|
|
||||||
|
while getopts i:c:o:d: OPTION
|
||||||
|
do
|
||||||
|
case "${OPTION}" in
|
||||||
|
i) inputFile=${OPTARG};;
|
||||||
|
c) expectedColumns=${OPTARG};;
|
||||||
|
o) outputFile=${OPTARG};;
|
||||||
|
d) data=${OPTARG};;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
timestamp=$(date +%s)
|
||||||
|
|
||||||
|
# if data requested, dump row number, number of columns and the row
|
||||||
|
if [ "${data}" = true ] ; then
|
||||||
|
if [ -z "${outputFile}" ]; then
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile}
|
||||||
|
else
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile} > ${outputFile}
|
||||||
|
fi
|
||||||
|
# else, dump only row number, number of columns
|
||||||
|
else
|
||||||
|
if [ -z "${outputFile}" ]; then
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile}
|
||||||
|
else
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile} > ${outputFile}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-timestamp))
|
||||||
|
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)
|
75
scripts/timescaledb-import.sh
Executable file
75
scripts/timescaledb-import.sh
Executable file
@ -0,0 +1,75 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Requires:
|
||||||
|
# IMPORT_LOG
|
||||||
|
# IMPORT_INPUT_DIR
|
||||||
|
# IMPORT_INPUT_DEDUP_DIR
|
||||||
|
# TIMESCALEDB_WORKERS
|
||||||
|
# DATABASE_USER
|
||||||
|
# DATABASE_HOSTNAME
|
||||||
|
# DATABASE_PORT
|
||||||
|
# DATABASE_NAME
|
||||||
|
# DATABASE_PASSWORD
|
||||||
|
|
||||||
|
DEFAULT_TIMESCALEDB_WORKERS=8
|
||||||
|
|
||||||
|
# env file arg
|
||||||
|
ENV=$1
|
||||||
|
echo "Using env file: ${ENV}"
|
||||||
|
|
||||||
|
# read env file
|
||||||
|
export $(grep -v '^#' ${ENV} | xargs)
|
||||||
|
|
||||||
|
if [ "$TIMESCALEDB_WORKERS" = "" ]; then
|
||||||
|
TIMESCALEDB_WORKERS=$DEFAULT_TIMESCALEDB_WORKERS
|
||||||
|
fi
|
||||||
|
|
||||||
|
# redirect stdout/stderr to a file
|
||||||
|
exec >"${IMPORT_LOG}" 2>&1
|
||||||
|
|
||||||
|
start_timestamp=$(date +%s)
|
||||||
|
|
||||||
|
declare -a tables
|
||||||
|
# schema-table-copyOptions
|
||||||
|
tables=(
|
||||||
|
"public-nodes"
|
||||||
|
"public-blocks"
|
||||||
|
"eth-access_list_elements"
|
||||||
|
"eth-log_cids-FORCE NOT NULL topic0, topic1, topic2, topic3 CSV"
|
||||||
|
"eth-state_accounts"
|
||||||
|
"eth-storage_cids-FORCE NOT NULL storage_leaf_key CSV"
|
||||||
|
"eth-uncle_cids"
|
||||||
|
"eth-header_cids"
|
||||||
|
"eth-receipt_cids-FORCE NOT NULL post_state, contract, contract_hash CSV"
|
||||||
|
"eth-state_cids-FORCE NOT NULL state_leaf_key CSV"
|
||||||
|
"eth-transaction_cids-FORCE NOT NULL dst CSV"
|
||||||
|
)
|
||||||
|
|
||||||
|
for elem in "${tables[@]}";
|
||||||
|
do
|
||||||
|
IFS='-' read -a arr <<< "${elem}"
|
||||||
|
|
||||||
|
if [ "${arr[0]}.${arr[1]}" = "public.blocks" ];
|
||||||
|
then
|
||||||
|
copy_command="timescaledb-parallel-copy --connection \"host=${DATABASE_HOSTNAME} port=${DATABASE_PORT} user=${DATABASE_USER} password=${DATABASE_PASSWORD} sslmode=disable\" --db-name ${DATABASE_NAME} --schema ${arr[0]} --table ${arr[1]} --file ${IMPORT_INPUT_DEDUP_DIR}/deduped-${arr[0]}.${arr[1]}.csv --workers ${TIMESCALEDB_WORKERS} --reporting-period 300s"
|
||||||
|
else
|
||||||
|
copy_command="timescaledb-parallel-copy --connection \"host=${DATABASE_HOSTNAME} port=${DATABASE_PORT} user=${DATABASE_USER} password=${DATABASE_PASSWORD} sslmode=disable\" --db-name ${DATABASE_NAME} --schema ${arr[0]} --table ${arr[1]} --file ${IMPORT_INPUT_DIR}/${arr[0]}.${arr[1]}.csv --workers ${TIMESCALEDB_WORKERS} --reporting-period 300s"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${arr[2]}" != "" ];
|
||||||
|
then
|
||||||
|
copy_with_options="${copy_command} --copy-options \"${arr[2]}\""
|
||||||
|
else
|
||||||
|
copy_with_options=${copy_command}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${arr[0]}.${arr[1]}"
|
||||||
|
echo Start: "$(date)"
|
||||||
|
eval "${copy_with_options}"
|
||||||
|
echo End: "$(date)"
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-start_timestamp))
|
||||||
|
echo Time taken: $((difference/86400)):$(date -d@${difference} -u +%H:%M:%S)
|
||||||
|
echo
|
Loading…
Reference in New Issue
Block a user