Compare commits

...

56 Commits

Author SHA1 Message Date
9e483fc9f7 Updates for Cancun fork (#10)
- Bumps Geth to v1.14
- Adds support for pebbledb

Reviewed-on: #10
2024-08-05 13:17:00 +00:00
434c9f6b48 Fix checksum build error. 2023-10-27 17:06:47 -05:00
bb49906860 Fix state/storage counter metrics. (#8)
The counters for state, storage, and code node no longer work after 00141776bf.

The structure is sufficiently different I did not see a simple way to restore the code counters, but I did restore the state and storage counters.

Reviewed-on: #8
2023-10-09 21:17:20 +00:00
0c323433af Missing ENV bindings. (#9)
In passing, I noticed that these options were no longer being bound to their defined ENV variables.

Reviewed-on: #9
Co-authored-by: Thomas E Lackey <telackey@bozemanpass.com>
Co-committed-by: Thomas E Lackey <telackey@bozemanpass.com>
2023-10-09 20:23:53 +00:00
b4367dff3b 6: Restore previous scheme for estimating progress. (#7)
```
ipld_eth_state_snapshot_stats_tracked_iterator_1 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_10 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_11 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_12 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_13 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_14 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_15 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_16 0
ipld_eth_state_snapshot_stats_tracked_iterator_2 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_3 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_4 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_5 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_6 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_7 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_8 37.5
ipld_eth_state_snapshot_stats_tracked_iterator_9 37.5
```

Reviewed-on: #7
Co-authored-by: Thomas E Lackey <telackey@bozemanpass.com>
Co-committed-by: Thomas E Lackey <telackey@bozemanpass.com>
2023-10-06 23:20:05 +00:00
00141776bf Refactor to use statediff plugin (#1)
* Refactors to replace most of the code with the statediff plugin.
* Adds basic CI test workflows for Gitea
* Refactors fixtures to use https://git.vdb.to/cerc-io/eth-testing
* Renames env vars for consistency with flags and other services:
  - LOGRUS_{LEVEL,FILE} => LOG_LEVEL, etc.
  - LVL_DB_PATH => LEVELDB_PATH
  - ANCIENT_DB_PATH => LEVELDB_ANCIENT
  - These will need to be updated wherever they are used

Reviewed-on: #1
2023-09-29 18:43:26 +00:00
4e0b481ea5
Update to geth v1.11.6-statediff-5.0.8 (#79)
* Update to geth v1.11.6-statediff-5.0.8
2023-07-21 19:05:01 -05:00
Ian Norden
da02e5ac12
Merge pull request #77 from cerc-io/ian/v5_dev
fix: err overshadowing in defers
2023-06-01 08:23:02 -05:00
i-norden
fe88e90181 avoid overshadowing err in defers 2023-05-31 18:08:02 -05:00
768357293c
Add progress counter to prometheus output (#76)
* Add a progress counter by checking the distance already traversed from the startPath to endPath in a bounded iterator vs the estimated number of iterations.
2023-05-23 11:23:58 -05:00
Ian Norden
dd86f02997
Merge pull request #75 from cerc-io/ian/v5_dev
fix overshadowing of snap.Tx
2023-05-18 09:37:26 -05:00
i-norden
891fca89bd fix overshadowing of snap.Tx 2023-05-17 08:16:50 -05:00
Ian Norden
0cab03b98e
Merge pull request #74 from cerc-io/ian/v5_dev
update geth and ipld-eth-db deps
2023-05-15 11:38:24 -05:00
i-norden
81e286399b update geth and ipld-eth-db deps 2023-05-15 11:32:29 -05:00
Ian Norden
7e426b2ca5
Merge pull request #71 from cerc-io/ian/v5_dev
v5 upgrade
2023-05-12 11:29:14 -05:00
i-norden
98015c4c87 fix NUMERIC formattign error 2023-05-12 11:11:00 -05:00
i-norden
6de5b9e96c update misc 2023-05-12 10:41:18 -05:00
i-norden
2dd9221467 update go mod 2023-05-12 10:27:20 -05:00
i-norden
ea4c1042c4 update service_test with new fixtures 2023-05-12 10:07:28 -05:00
i-norden
7af17b1851 update test helpers 2023-05-12 10:07:12 -05:00
i-norden
4081787b03 fix service.go: prevent panics on subtrie iterators with odd-length prefix-paths 2023-05-12 10:06:54 -05:00
i-norden
3d8064ccbb finish updating test fixtures 2023-05-12 09:58:46 -05:00
i-norden
fdb105b769 update compose 2023-05-10 13:11:42 -05:00
i-norden
32b637671d use v5.0.2 statediffing geth 2023-05-10 13:11:23 -05:00
i-norden
4ee75a3371 update publisher interface 2023-05-10 13:11:12 -05:00
82176ea41c
Add Docker files (#73) 2023-05-09 10:33:32 -05:00
i-norden
0a04baab17 bump db version in docker compose 2023-04-12 13:13:04 -05:00
i-norden
4245b80a4a go mod tidy 2023-04-12 13:13:04 -05:00
i-norden
22ecd4065a update service 2023-04-12 13:13:04 -05:00
i-norden
382ad92701 removei in-place snapshot stuff 2023-04-12 13:13:04 -05:00
i-norden
14b1180161 update Block1 and Chain2_Block32 test fixtures 2023-04-12 13:13:04 -05:00
i-norden
ead007f159 update csv file publisher 2023-04-12 13:13:04 -05:00
i-norden
f1a980f37c update direct pg publisher 2023-04-12 13:13:04 -05:00
i-norden
f83ab82424 use types/models exported from vdb geth 2023-04-12 13:13:04 -05:00
Ian Norden
fb5a95d874
Merge pull request #72 from cerc-io/ian/v5_rebase
Rebase
2023-04-12 13:12:35 -05:00
97ee99a449 Update schema (#61)
Updates table to add header_cids.uncles_hash
2023-04-12 13:09:58 -05:00
Ian Norden
b6d7695536
Merge pull request #70 from cerc-io/ian/v4_dev
update db version
2023-04-11 09:06:03 -05:00
i-norden
3f3e77cbac update db version 2023-04-11 09:01:50 -05:00
Ian Norden
e63ffbf0ad
Merge pull request #69 from cerc-io/ian/v4_dev
Update to use v4 vdb geth v1.11.5
2023-03-31 12:50:23 -05:00
i-norden
6ede522ae0 missing .meta files in fixtures 2023-03-31 12:36:30 -05:00
i-norden
5c13d59515 refactor to work with v4 vdb geth v1.11.5 2023-03-31 10:39:27 -05:00
i-norden
6fe54aa8b4 use go1.19 2023-03-31 10:39:13 -05:00
i-norden
c9dd85488b bump vdb v4 geth to v1.11.5 2023-03-31 10:39:00 -05:00
Michael
ac85fe29eb
dependency updates for geth 1.10.26 (#67) 2022-11-08 14:11:11 -05:00
c270f39de9
Update schema (#61)
Updates table to add header_cids.uncles_hash
2022-10-13 13:19:47 -05:00
Michael
9ea78c4ecf
dependency updates for geth 1.10.25 (#65) 2022-09-26 09:14:15 -04:00
Michael
989bd1c0f1
Cerc refactor (#64)
* cerc refactor waiting on unpublished dependencies

* more cerc_refactor with published ipld-eth-db unstable

* TearDownDB refactor

* missed second publisher_test TearDownDB
2022-09-20 13:47:34 -04:00
Michael
3f93a989dc
Merge pull request #63 from vulcanize/rebase-1.10.23-wip
updates for geth rebase on 1.10.23
2022-09-02 15:39:23 -04:00
Michael Shaw
321e37584d updates for geth rebase on 1.10.23 2022-09-02 15:21:29 -04:00
prathamesh0
be544a3424
Add helper scripts for data dump correction (#57)
* Add a script to find bad data in CSV file dumps

* Add a script to delete bad rows from CSV file dumps

* Add instructions to run the scripts

* Reorganize instructions
2022-08-17 15:14:14 +05:30
Ian Norden
05aeeab581
Account selective snapshot (#46)
* snapshotter ignores nodes not along a path along those derived from a list of account addresses if one is provided

* config and env updates

* cmd update

* Encode watched address path bytes to hex for comparison

* actually ignore the subtries that are not along the paths of interest

* Fixes for account selective snapshot

* Use non-concurrent iterator when having a single worker

* Only index root node when starting path of an iterator is nil

* Upgrade deps

* Avoid tracking iterators and skip recovery test

* Fix recovery mechanism, use sync Map instead of buffered channels

* Add test for account selective snapshot

* Continue traversal with concurrent iterators with starting path nil

* Use errgroup to simplify error handling with concurrent iterators

* Check if all the nodes are indexed in the recovery test

* Use concurrency safe sync Map in account selective snapshot test

* Only track concurrent iterators and refactor code

* Fix node and recovered path comparison

* Revert back to using buffered channels for tracking iterators

* Add a metric to monitor number of active iterators

* Update docs

* Update seeked path after node is processed

* Return error on context cancellation from subtrie iteration

* Add tests for account selective snapshot recovery

* Explicity enforce concurrent iterator bounds to avoid duplicate nodes

* Update full snapshot test to check nodes being indexed

* Refactor code to simplify snapshot logic

* Remove unnecessary function argument

* Use ctx cancellation for handling signals

* Add descriptive comments

Co-authored-by: prathamesh0 <prathamesh.musale0@gmail.com>
2022-08-03 17:05:04 +05:30
prathamesh0
5002c82038
Upgrade dependencies (#56) 2022-08-03 16:11:38 +05:30
Michael
289fb63568
Merge pull request #54 from vulcanize/release-v4.1.1-alpha
update for go-ethereum 1.10.20
2022-07-20 17:18:58 -04:00
Michael Shaw
257fac1a0c update for go-ethereum 1.10.20 2022-07-19 15:12:44 -04:00
prathamesh0
c0a7fdf9e8
Fix typo in config format in README (#53) 2022-07-18 16:48:58 +05:30
prathamesh0
2af26ad583
Add instructions to import snapshot data into database (#52)
* Add instructions to import snapshot data into database

* Add monitoring and update data processing in README

* Update instructions to import snapshot
2022-07-18 15:30:23 +05:30
73 changed files with 3335 additions and 3668 deletions

143
.gitea/workflows/test.yml Normal file
View File

@ -0,0 +1,143 @@
name: Test
on:
pull_request:
branches: '*'
push:
branches:
- main
env:
CANONICAL_VERSION: v5.0.4-alpha
ETH_TESTING_REF: v0.5.1
jobs:
build:
name: Build Docker image
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build docker image
run: docker build .
unit-test:
name: Run unit tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v3
with:
go-version-file: go.mod
check-latest: true
- name: Install test fixtures
uses: actions/checkout@v3
with:
repository: cerc-io/eth-testing
path: ./fixtures
ref: ${{ env.ETH_TESTING_REF }}
- name: Run unit tests
run: make test
integration-test:
name: Run integration tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v3
with:
go-version-file: go.mod
check-latest: true
- name: Install test fixtures
uses: actions/checkout@v3
with:
repository: cerc-io/eth-testing
path: ./fixtures
ref: ${{ env.ETH_TESTING_REF }}
- name: Build package
run: go build .
- name: Run DB container
run: docker compose -f test/compose.yml up --wait
# Run a sanity test against the fixture data
# Complete integration tests are TODO
- name: Run basic integration test
env:
SNAPSHOT_MODE: postgres
ETHDB_PATH: ./fixtures/chains/data/postmerge1/geth/chaindata
ETH_GENESIS_BLOCK: 0x66ef6002e201cfdb23bd3f615fcf41e59d8382055e5a836f8d4c2af0d484647c
SNAPSHOT_BLOCK_HEIGHT: 170
run: |
until
ready_query='select max(version_id) from goose_db_version;'
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
psql -tA cerc_testing -U vdbm -c "$ready_query")
[[ "$version" -ge 21 ]]
do
echo "Waiting for ipld-eth-db..."
sleep 3
done
./ipld-eth-state-snapshot --config test/ci-config.toml stateSnapshot
count_results() {
query="select count(*) from $1;"
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
psql -tA cerc_testing -U vdbm -c "$query"
}
set -x
[[ "$(count_results eth.header_cids)" = 1 ]]
[[ "$(count_results eth.state_cids)" = 264 ]]
[[ "$(count_results eth.storage_cids)" = 371 ]]
compliance-test:
name: Run compliance tests (disabled)
# Schema has been updated, so compliance tests are disabled until we have a meaningful way to
# compare to previous results.
if: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
path: ./ipld-eth-state-snapshot
- uses: actions/setup-go@v3
with:
go-version-file: ./ipld-eth-state-snapshot/go.mod
check-latest: true
- name: Install test fixtures
uses: actions/checkout@v3
with:
repository: cerc-io/eth-testing
path: ./fixtures
ref: ${{ env.ETH_TESTING_REF }}
- name: Build current version
working-directory: ./ipld-eth-state-snapshot
run: go build -o ../snapshot-current .
- name: Checkout canonical version
uses: actions/checkout@v3
with:
path: ./ipld-eth-state-snapshot-canonical
ref: ${{ env.CANONICAL_VERSION }}
- name: Build canonical version
working-directory: ./ipld-eth-state-snapshot-canonical
run: go build -o ../snapshot-canonical .
- name: Run DB container
working-directory: ./ipld-eth-state-snapshot
run: docker compose -f test/compose.yml up --wait
- name: Compare snapshot output
env:
SNAPSHOT_BLOCK_HEIGHT: 200
ETHDB_PATH: ./fixtures/chains/data/premerge2/geth/chaindata
ETHDB_ANCIENT: ./fixtures/chains/data/premerge2/geth/chaindata/ancient
ETH_GENESIS_BLOCK: "0x8a3c7cddacbd1ab4ec1b03805fa2a287f3a75e43d87f4f987fcc399f5c042614"
run: |
until
ready_query='select max(version_id) from goose_db_version;'
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
psql -tA cerc_testing -U vdbm -c "$ready_query")
[[ "$version" -ge 21 ]]
do sleep 1; done
./ipld-eth-state-snapshot/scripts/compare-snapshots.sh \
./snapshot-canonical ./snapshot-current

View File

@ -1,30 +0,0 @@
name: Docker Build
on: [pull_request]
jobs:
test:
name: Run unit tests
runs-on: ubuntu-latest
env:
GOPATH: /tmp/go
GO111MODULE: on
steps:
- name: Create GOPATH
run: mkdir -p /tmp/go
- uses: actions/setup-go@v3
with:
go-version: ">=1.18.0"
check-latest: true
- name: Checkout code
uses: actions/checkout@v2
- name: Run database
run: docker-compose up -d
- name: Run unit tests
run: |
sleep 45
make dbtest

5
.gitignore vendored
View File

@ -1,5 +1,6 @@
.idea/ .idea/
.vscode/ .vscode/
ipld-eth-state-snapshot ipld-eth-state-snapshot
mocks/ output_dir*/
.vscode log_file
recovery_file

31
Dockerfile Normal file
View File

@ -0,0 +1,31 @@
FROM golang:1.21-alpine AS builder
RUN apk add --no-cache git gcc musl-dev binutils-gold
# DEBUG
RUN apk add busybox-extras
WORKDIR /ipld-eth-state-snapshot
ARG GIT_VDBTO_TOKEN
COPY go.mod go.sum ./
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
go mod download && \
rm -f ~/.gitconfig
COPY . .
RUN go build -ldflags '-extldflags "-static"' -o ipld-eth-state-snapshot .
FROM alpine
RUN apk --no-cache add su-exec bash
WORKDIR /app
COPY --from=builder /ipld-eth-state-snapshot/startup_script.sh .
COPY --from=builder /ipld-eth-state-snapshot/environments environments
# keep binaries immutable
COPY --from=builder /ipld-eth-state-snapshot/ipld-eth-state-snapshot ipld-eth-state-snapshot
ENTRYPOINT ["/app/startup_script.sh"]

View File

@ -1,28 +1,13 @@
BIN = $(GOPATH)/bin MOCKGEN ?= mockgen
MOCKS_DIR := $(CURDIR)/internal/mocks
## Mockgen tool mocks: $(MOCKS_DIR)/gen_indexer.go
MOCKGEN = $(BIN)/mockgen .PHONY: mocks
$(BIN)/mockgen:
go install github.com/golang/mock/mockgen@v1.6.0
MOCKS_DIR = $(CURDIR)/mocks $(MOCKS_DIR)/gen_indexer.go:
$(MOCKGEN) --package mocks --destination $@ \
.PHONY: mocks test --mock_names Indexer=MockgenIndexer \
github.com/cerc-io/plugeth-statediff/indexer Indexer
mocks: $(MOCKGEN) mocks/snapshot/publisher.go
mocks/snapshot/publisher.go: pkg/types/publisher.go
$(MOCKGEN) -package snapshot_mock -destination $@ -source $< Publisher Tx
clean:
rm -f mocks/snapshot/publisher.go
build:
go fmt ./...
go build
test: mocks test: mocks
go clean -testcache && go test -p 1 -v ./... go clean -testcache && go test -p 1 -v ./...
dbtest: mocks
go clean -testcache && TEST_WITH_DB=true go test -p 1 -v ./...

220
README.md
View File

@ -1,67 +1,209 @@
# ipld-eth-state-snapshot # ipld-eth-state-snapshot
> Tool for extracting the entire Ethereum state at a particular block height from leveldb into Postgres-backed IPFS > Tool for extracting the entire Ethereum state at a particular block height from a cold database into Postgres-backed IPFS
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot) [![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot)
## Usage ## Setup
For state snapshot from LevelDB * Build the binary:
```bash
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
```
For in-place snapshot in database ```bash
```bash make build
./ipld-eth-state-snapshot inPlaceStateSnapshot --config={path to toml config file} ```
```
### Config ## Configuration
Config format: Config format:
```toml ```toml
[snapshot] [snapshot]
mode = "file" # indicates output mode ("postgres" or "file") mode = "file" # indicates output mode <postgres | file>
workers = 4 # degree of concurrency, the state trie is subdivided into sectiosn that are traversed and processed concurrently workers = 4 # degree of concurrency: the state trie is subdivided into sections that are traversed and processed concurrently
blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in leveldb) blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in ethdb)
recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure
accounts = [] # list of accounts (addresses) to take the snapshot for # SNAPSHOT_ACCOUNTS
[leveldb] [ethdb]
path = "/Users/user/Library/Ethereum/geth/chaindata" # path to geth leveldb # path to geth ethdb
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # path to geth ancient database path = "/Users/user/Library/Ethereum/geth/chaindata" # ETHDB_PATH
# path to geth ancient database
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # ETHDB_ANCIENT
[database] [database]
name = "vulcanize_public" # postgres database name # when operating in 'postgres' output mode
hostname = "localhost" # postgres host # db credentials
port = 5432 # postgres port name = "vulcanize_public" # DATABASE_NAME
user = "postgres" # postgres user hostname = "localhost" # DATABASE_HOSTNAME
password = "" # postgres password port = 5432 # DATABASE_PORT
user = "postgres" # DATABASE_USER
password = "" # DATABASE_PASSWORD
[file] [file]
outputDir = "output_dir/" # when operating in 'file' output mode, this is the directory the files are written to # when operating in 'file' output mode
# directory the CSV files are written to
outputDir = "output_dir/" # FILE_OUTPUT_DIR
[log] [log]
level = "info" # log level (trace, debug, info, warn, error, fatal, panic) (default: info) level = "info" # log level (trace, debug, info, warn, error, fatal, panic) (default: info)
file = "log_file" # file path for logging file = "log_file" # file path for logging, leave unset to log to stdout
[prom] [prom]
metrics = true # enable prometheus metrics (default: false) # prometheus metrics
http = true # enable prometheus http service (default: false) metrics = true # enable prometheus metrics (default: false)
httpAddr = "0.0.0.0" # prometheus http host (default: 127.0.0.1) http = true # enable prometheus http service (default: false)
httpPort = 9101 # prometheus http port (default: 8086) httpAddr = "0.0.0.0" # prometheus http host (default: 127.0.0.1)
dbStats = true # enable prometheus db stats (default: false) httpPort = 9101 # prometheus http port (default: 8086)
dbStats = true # enable prometheus db stats (default: false)
# node info
[ethereum] [ethereum]
clientName = "Geth" # $ETH_CLIENT_NAME # node info
nodeID = "arch1" # $ETH_NODE_ID clientName = "Geth" # ETH_CLIENT_NAME
networkID = "1" # $ETH_NETWORK_ID nodeID = "arch1" # ETH_NODE_ID
chainID = "1" # $ETH_CHAIN_ID networkID = "1" # ETH_NETWORK_ID
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK chainID = "1" # ETH_CHAIN_ID
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
``` ```
> **Note:** previous versions of this service used different variable names. To update, change the following:
> * `LVL_DB_PATH`, `LEVELDB_PATH` => `ETHDB_PATH`
> * `ANCIENT_DB_PATH`, `LEVELDB_ANCIENT` => `ETHDB_ANCIENT`
> * `LOGRUS_LEVEL`, `LOGRUS_FILE` => `LOG_LEVEL`, `LOG_FILE`, etc.
## Usage
* For state snapshot from EthDB:
```bash
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
```
* Account selective snapshot: To restrict the snapshot to a list of accounts (addresses), provide the addresses in config parameter `snapshot.accounts` or env variable `SNAPSHOT_ACCOUNTS`. Only nodes related to provided addresses will be indexed.
Example:
```toml
[snapshot]
accounts = [
"0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"
]
```
## Monitoring
* Enable metrics using config parameters `prom.metrics` and `prom.http`.
* `ipld-eth-state-snapshot` exposes following prometheus metrics at `/metrics` endpoint:
* `state_node_count`: Number of state nodes processed.
* `storage_node_count`: Number of storage nodes processed.
* `code_node_count`: Number of code nodes processed.
* DB stats if operating in `postgres` mode.
## Tests ## Tests
* Install [mockgen](https://github.com/golang/mock#installation) * Run unit tests:
* `make test`
```bash
# setup db
docker-compose up -d
# run tests after db migrations are run
make dbtest
# tear down db
docker-compose down -v --remove-orphans
```
## Import output data in file mode into a database
* When `ipld-eth-state-snapshot stateSnapshot` is run in file mode (`database.type`), the output is in form of CSV files.
* Assuming the output files are located in host's `./output_dir` directory.
* Data post-processing:
* Create a directory to store post-processed output:
```bash
mkdir -p output_dir/processed_output
```
* Combine output from multiple workers and copy to post-processed output directory:
```bash
# ipld.blocks
cat {output_dir,output_dir/*}/ipld.blocks.csv > output_dir/processed_output/combined-ipld.blocks.csv
# eth.state_cids
cat output_dir/*/eth.state_cids.csv > output_dir/processed_output/combined-eth.state_cids.csv
# eth.storage_cids
cat output_dir/*/eth.storage_cids.csv > output_dir/processed_output/combined-eth.storage_cids.csv
# public.nodes
cp output_dir/public.nodes.csv output_dir/processed_output/public.nodes.csv
# eth.header_cids
cp output_dir/eth.header_cids.csv output_dir/processed_output/eth.header_cids.csv
```
* De-duplicate data:
```bash
# ipld.blocks
sort -u output_dir/processed_output/combined-ipld.blocks.csv -o output_dir/processed_output/deduped-combined-ipld.blocks.csv
# eth.header_cids
sort -u output_dir/processed_output/eth.header_cids.csv -o output_dir/processed_output/deduped-eth.header_cids.csv
# eth.state_cids
sort -u output_dir/processed_output/combined-eth.state_cids.csv -o output_dir/processed_output/deduped-combined-eth.state_cids.csv
# eth.storage_cids
sort -u output_dir/processed_output/combined-eth.storage_cids.csv -o output_dir/processed_output/deduped-combined-eth.storage_cids.csv
```
* Copy over the post-processed output files to the DB server (say in `/output_dir`).
* Start `psql` to run the import commands:
```bash
psql -U <DATABASE_USER> -h <DATABASE_HOSTNAME> -p <DATABASE_PORT> <DATABASE_NAME>
```
* Run the following to import data:
```bash
# public.nodes
COPY public.nodes FROM '/output_dir/processed_output/public.nodes.csv' CSV;
# ipld.blocks
COPY ipld.blocks FROM '/output_dir/processed_output/deduped-combined-ipld.blocks.csv' CSV;
# eth.header_cids
COPY eth.header_cids FROM '/output_dir/processed_output/deduped-eth.header_cids.csv' CSV;
# eth.state_cids
COPY eth.state_cids FROM '/output_dir/processed_output/deduped-combined-eth.state_cids.csv' CSV FORCE NOT NULL state_leaf_key;
# eth.storage_cids
COPY eth.storage_cids FROM '/output_dir/processed_output/deduped-combined-eth.storage_cids.csv' CSV FORCE NOT NULL storage_leaf_key;
```
* NOTE: `COPY` command on CSVs inserts empty strings as `NULL` in the DB. Passing `FORCE_NOT_NULL <COLUMN_NAME>` forces it to insert empty strings instead. This is required to maintain compatibility of the imported snapshot data with the data generated by statediffing. Reference: https://www.postgresql.org/docs/14/sql-copy.html
### Troubleshooting
* Run the following command to find any rows (in data dumps in `file` mode) having unexpected number of columns:
```bash
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d true
```
* Run the following command to select rows (from data dumps in `file` mode) other than the ones having unexpected number of columns:
```bash
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
```
* See [scripts](./scripts) for more details.

View File

@ -1,63 +0,0 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
)
// inPlaceStateSnapshotCmd represents the inPlaceStateSnapshot command
var inPlaceStateSnapshotCmd = &cobra.Command{
Use: "inPlaceStateSnapshot",
Short: "Take an in-place state snapshot in the database",
Long: `Usage:
./ipld-eth-state-snapshot inPlaceStateSnapshot --config={path to toml config file}`,
Run: func(cmd *cobra.Command, args []string) {
subCommand = cmd.CalledAs()
logWithCommand = *logrus.WithField("SubCommand", subCommand)
inPlaceStateSnapshot()
},
}
func inPlaceStateSnapshot() {
config := snapshot.NewInPlaceSnapshotConfig()
startHeight := viper.GetUint64(snapshot.SNAPSHOT_START_HEIGHT_TOML)
endHeight := viper.GetUint64(snapshot.SNAPSHOT_END_HEIGHT_TOML)
params := snapshot.InPlaceSnapshotParams{StartHeight: uint64(startHeight), EndHeight: uint64(endHeight)}
if err := snapshot.CreateInPlaceSnapshot(config, params); err != nil {
logWithCommand.Fatal(err)
}
logWithCommand.Infof("snapshot taken at height %d starting from height %d", endHeight, startHeight)
}
func init() {
rootCmd.AddCommand(inPlaceStateSnapshotCmd)
inPlaceStateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_START_HEIGHT_CLI, "", "start block height for in-place snapshot")
inPlaceStateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_END_HEIGHT_CLI, "", "end block height for in-place snapshot")
viper.BindPFlag(snapshot.SNAPSHOT_START_HEIGHT_TOML, inPlaceStateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_START_HEIGHT_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_END_HEIGHT_TOML, inPlaceStateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_END_HEIGHT_CLI))
}

View File

@ -25,8 +25,8 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom" "github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot" "github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
) )
var ( var (
@ -42,14 +42,13 @@ var rootCmd = &cobra.Command{
// Execute executes root Command. // Execute executes root Command.
func Execute() { func Execute() {
log.Info("----- Starting vDB -----")
if err := rootCmd.Execute(); err != nil { if err := rootCmd.Execute(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
} }
func initFuncs(cmd *cobra.Command, args []string) { func initFuncs(cmd *cobra.Command, args []string) {
logfile := viper.GetString(snapshot.LOGRUS_FILE_TOML) logfile := viper.GetString(snapshot.LOG_FILE_TOML)
if logfile != "" { if logfile != "" {
file, err := os.OpenFile(logfile, file, err := os.OpenFile(logfile,
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
@ -68,7 +67,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
} }
if viper.GetBool(snapshot.PROM_METRICS_TOML) { if viper.GetBool(snapshot.PROM_METRICS_TOML) {
log.Info("initializing prometheus metrics") log.Info("Initializing prometheus metrics")
prom.Init() prom.Init()
} }
@ -84,7 +83,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
} }
func logLevel() error { func logLevel() error {
lvl, err := log.ParseLevel(viper.GetString(snapshot.LOGRUS_LEVEL_TOML)) lvl, err := log.ParseLevel(viper.GetString(snapshot.LOG_LEVEL_TOML))
if err != nil { if err != nil {
return err return err
} }
@ -103,13 +102,13 @@ func init() {
viper.AutomaticEnv() viper.AutomaticEnv()
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location") rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
rootCmd.PersistentFlags().String(snapshot.LOGRUS_FILE_CLI, "", "file path for logging") rootCmd.PersistentFlags().String(snapshot.LOG_FILE_CLI, "", "file path for logging")
rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name") rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name")
rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port") rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port")
rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname") rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname")
rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user") rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user")
rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password") rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password")
rootCmd.PersistentFlags().String(snapshot.LOGRUS_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)") rootCmd.PersistentFlags().String(snapshot.LOG_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics") rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics")
rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service") rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service")
@ -117,13 +116,13 @@ func init() {
rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port") rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port")
rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats") rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats")
viper.BindPFlag(snapshot.LOGRUS_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_FILE_CLI)) viper.BindPFlag(snapshot.LOG_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_FILE_CLI))
viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI)) viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI))
viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI)) viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI))
viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI)) viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI))
viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI)) viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI))
viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI)) viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI))
viper.BindPFlag(snapshot.LOGRUS_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_LEVEL_CLI)) viper.BindPFlag(snapshot.LOG_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_LEVEL_CLI))
viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI)) viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI))
viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI)) viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI))
@ -138,7 +137,7 @@ func initConfig() {
if err := viper.ReadInConfig(); err == nil { if err := viper.ReadInConfig(); err == nil {
log.Printf("Using config file: %s", viper.ConfigFileUsed()) log.Printf("Using config file: %s", viper.ConfigFileUsed())
} else { } else {
log.Fatal(fmt.Sprintf("Couldn't read config file: %s", err.Error())) log.Fatalf("Couldn't read config file: %s", err)
} }
} else { } else {
log.Warn("No config file passed with --config flag") log.Warn("No config file passed with --config flag")

View File

@ -16,19 +16,21 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot" "github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
"github.com/cerc-io/plugeth-statediff/indexer"
) )
// stateSnapshotCmd represents the stateSnapshot command // stateSnapshotCmd represents the stateSnapshot command
var stateSnapshotCmd = &cobra.Command{ var stateSnapshotCmd = &cobra.Command{
Use: "stateSnapshot", Use: "stateSnapshot",
Short: "Extract the entire Ethereum state from leveldb and publish into PG-IPFS", Short: "Extract the entire Ethereum state from Ethdb and publish into PG-IPFS",
Long: `Usage Long: `Usage
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`, ./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`,
@ -40,15 +42,14 @@ var stateSnapshotCmd = &cobra.Command{
} }
func stateSnapshot() { func stateSnapshot() {
modeStr := viper.GetString(snapshot.SNAPSHOT_MODE_TOML) mode := snapshot.SnapshotMode(viper.GetString(snapshot.SNAPSHOT_MODE_TOML))
mode := snapshot.SnapshotMode(modeStr)
config, err := snapshot.NewConfig(mode) config, err := snapshot.NewConfig(mode)
if err != nil { if err != nil {
logWithCommand.Fatalf("unable to initialize config: %v", err) logWithCommand.Fatalf("unable to initialize config: %v", err)
} }
logWithCommand.Infof("opening levelDB and ancient data at %s and %s", logWithCommand.Infof("opening ethdb and ancient data at %s and %s",
config.Eth.LevelDBPath, config.Eth.AncientDBPath) config.Eth.DBPath, config.Eth.AncientDBPath)
edb, err := snapshot.NewLevelDB(config.Eth) edb, err := snapshot.NewEthDB(config.Eth)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
@ -59,46 +60,60 @@ func stateSnapshot() {
logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile) logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile)
} }
pub, err := snapshot.NewPublisher(mode, config) var idxconfig indexer.Config
switch mode {
case snapshot.PgSnapshot:
idxconfig = *config.DB
case snapshot.FileSnapshot:
idxconfig = *config.File
}
_, indexer, err := indexer.NewStateDiffIndexer(
context.Background(),
nil, // ChainConfig is only used in PushBlock, which we don't call
config.Eth.NodeInfo,
idxconfig,
false,
)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
snapshotService, err := snapshot.NewSnapshotService(edb, pub, recoveryFile) snapshotService, err := snapshot.NewSnapshotService(edb, indexer, recoveryFile)
if err != nil { if err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
workers := viper.GetUint(snapshot.SNAPSHOT_WORKERS_TOML) workers := viper.GetUint(snapshot.SNAPSHOT_WORKERS_TOML)
if height < 0 { if height < 0 {
if err := snapshotService.CreateLatestSnapshot(workers); err != nil { if err := snapshotService.CreateLatestSnapshot(workers, config.Service.AllowedAccounts); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} else { } else {
params := snapshot.SnapshotParams{Workers: workers, Height: uint64(height)} params := snapshot.SnapshotParams{Workers: workers, Height: uint64(height), WatchedAddresses: config.Service.AllowedAccounts}
if err := snapshotService.CreateSnapshot(params); err != nil { if err := snapshotService.CreateSnapshot(params); err != nil {
logWithCommand.Fatal(err) logWithCommand.Fatal(err)
} }
} }
logWithCommand.Infof("state snapshot at height %d is complete", height) logWithCommand.Infof("State snapshot at height %d is complete", height)
} }
func init() { func init() {
rootCmd.AddCommand(stateSnapshotCmd) rootCmd.AddCommand(stateSnapshotCmd)
stateSnapshotCmd.PersistentFlags().String(snapshot.LVL_DB_PATH_CLI, "", "path to primary datastore") stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_PATH_CLI, "", "path to primary datastore")
stateSnapshotCmd.PersistentFlags().String(snapshot.ANCIENT_DB_PATH_CLI, "", "path to ancient datastore") stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_ANCIENT_CLI, "", "path to ancient datastore")
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at") stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at")
stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use") stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use")
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration") stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration")
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_MODE_CLI, "postgres", "output mode for snapshot ('file' or 'postgres')") stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_MODE_CLI, "postgres", "output mode for snapshot ('file' or 'postgres')")
stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode") stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode")
stateSnapshotCmd.PersistentFlags().StringArray(snapshot.SNAPSHOT_ACCOUNTS_CLI, nil, "list of account addresses to limit snapshot to")
viper.BindPFlag(snapshot.LVL_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.LVL_DB_PATH_CLI)) viper.BindPFlag(snapshot.ETHDB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_PATH_CLI))
viper.BindPFlag(snapshot.ANCIENT_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ANCIENT_DB_PATH_CLI)) viper.BindPFlag(snapshot.ETHDB_ANCIENT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_ANCIENT_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI)) viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI)) viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI)) viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_MODE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_MODE_CLI)) viper.BindPFlag(snapshot.SNAPSHOT_MODE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_MODE_CLI))
viper.BindPFlag(snapshot.FILE_OUTPUT_DIR_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.FILE_OUTPUT_DIR_CLI)) viper.BindPFlag(snapshot.FILE_OUTPUT_DIR_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.FILE_OUTPUT_DIR_CLI))
viper.BindPFlag(snapshot.SNAPSHOT_ACCOUNTS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_ACCOUNTS_CLI))
} }

View File

@ -1,8 +0,0 @@
-- +goose Up
CREATE TABLE IF NOT EXISTS public.blocks (
key TEXT UNIQUE NOT NULL,
data BYTEA NOT NULL
);
-- +goose Down
DROP TABLE public.blocks;

View File

@ -1,12 +0,0 @@
-- +goose Up
CREATE TABLE nodes (
id SERIAL PRIMARY KEY,
client_name VARCHAR,
genesis_block VARCHAR(66),
network_id VARCHAR,
node_id VARCHAR(128),
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
);
-- +goose Down
DROP TABLE nodes;

View File

@ -1,5 +0,0 @@
-- +goose Up
CREATE SCHEMA eth;
-- +goose Down
DROP SCHEMA eth;

View File

@ -1,23 +0,0 @@
-- +goose Up
CREATE TABLE eth.header_cids (
id SERIAL PRIMARY KEY,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
td NUMERIC NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
reward NUMERIC NOT NULL,
state_root VARCHAR(66) NOT NULL,
tx_root VARCHAR(66) NOT NULL,
receipt_root VARCHAR(66) NOT NULL,
uncle_root VARCHAR(66) NOT NULL,
bloom BYTEA NOT NULL,
timestamp NUMERIC NOT NULL,
times_validated INTEGER NOT NULL DEFAULT 1,
UNIQUE (block_number, block_hash)
);
-- +goose Down
DROP TABLE eth.header_cids;

View File

@ -1,14 +0,0 @@
-- +goose Up
CREATE TABLE eth.uncle_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
block_hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
reward NUMERIC NOT NULL,
UNIQUE (header_id, block_hash)
);
-- +goose Down
DROP TABLE eth.uncle_cids;

View File

@ -1,15 +0,0 @@
-- +goose Up
CREATE TABLE eth.transaction_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
tx_hash VARCHAR(66) NOT NULL,
index INTEGER NOT NULL,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
dst VARCHAR(66) NOT NULL,
src VARCHAR(66) NOT NULL,
UNIQUE (header_id, tx_hash)
);
-- +goose Down
DROP TABLE eth.transaction_cids;

View File

@ -1,18 +0,0 @@
-- +goose Up
CREATE TABLE eth.receipt_cids (
id SERIAL PRIMARY KEY,
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
contract VARCHAR(66),
contract_hash VARCHAR(66),
topic0s VARCHAR(66)[],
topic1s VARCHAR(66)[],
topic2s VARCHAR(66)[],
topic3s VARCHAR(66)[],
log_contracts VARCHAR(66)[],
UNIQUE (tx_id)
);
-- +goose Down
DROP TABLE eth.receipt_cids;

View File

@ -1,15 +0,0 @@
-- +goose Up
CREATE TABLE eth.state_cids (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
state_path BYTEA,
node_type INTEGER,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (header_id, state_path)
);
-- +goose Down
DROP TABLE eth.state_cids;

View File

@ -1,15 +0,0 @@
-- +goose Up
CREATE TABLE eth.storage_cids (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_leaf_key VARCHAR(66),
cid TEXT NOT NULL,
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
storage_path BYTEA,
node_type INTEGER NOT NULL,
diff BOOLEAN NOT NULL DEFAULT FALSE,
UNIQUE (state_id, storage_path)
);
-- +goose Down
DROP TABLE eth.storage_cids;

View File

@ -1,13 +0,0 @@
-- +goose Up
CREATE TABLE eth.state_accounts (
id SERIAL PRIMARY KEY,
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE,
balance NUMERIC NOT NULL,
nonce INTEGER NOT NULL,
code_hash BYTEA NOT NULL,
storage_root VARCHAR(66) NOT NULL,
UNIQUE (state_id)
);
-- +goose Down
DROP TABLE eth.state_accounts;

View File

@ -1,16 +1,17 @@
[database] [database]
name = "vulcanize_public" name = "cerc_testing"
hostname = "localhost" hostname = "localhost"
port = 5432 port = 8077
user = "postgres" user = "vdbm"
password = "password"
[leveldb] [ethdb]
path = "/Users/iannorden/Library/Ethereum/geth/chaindata" path = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data"
ancient = "/Users/iannorden/Library/Ethereum/geth/chaindata/ancient" ancient = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data/ancient"
[log] [log]
level = "info" level = "info"
file = "log_file" file = "" # Leave blank to output to stdout
[prom] [prom]
metrics = true metrics = true
@ -22,12 +23,9 @@
[snapshot] [snapshot]
mode = "file" mode = "file"
workers = 4 workers = 4
blockHeight = -1 blockHeight = 32
recoveryFile = "recovery_file" recoveryFile = "recovery_file"
startHeight = 1
endHeight = 12
[file] [file]
outputDir = "output_dir/" outputDir = "output_dir/"

View File

@ -1,27 +0,0 @@
package fixture
import (
"os"
"path/filepath"
"runtime"
)
// TODO: embed some mainnet data
// import "embed"
//_go:embed mainnet_data.tar.gz
var (
ChaindataPath, AncientdataPath string
)
func init() {
_, path, _, _ := runtime.Caller(0)
wd := filepath.Dir(path)
ChaindataPath = filepath.Join(wd, "..", "fixture", "chaindata")
AncientdataPath = filepath.Join(ChaindataPath, "ancient")
if _, err := os.Stat(ChaindataPath); err != nil {
panic("must populate chaindata at " + ChaindataPath)
}
}

View File

@ -1,6 +0,0 @@
*.log
CURRENT*
LOCK
LOG
MANIFEST-*
ancient/FLOCK

Binary file not shown.

Binary file not shown.

View File

@ -1,307 +0,0 @@
// Copyright © 2022 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package fixture
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
)
type Block struct {
Hash common.Hash
Number *big.Int
StateNodes []snapt.Node
StorageNodes [][]snapt.Node
}
var InPlaceSnapshotBlocks = []Block{
// Genesis block
{
Hash: common.HexToHash("0xe1bdb963128f645aa674b52a8c7ce00704762f27e2a6896abebd7954878f40e4"),
Number: big.NewInt(0),
StateNodes: []snapt.Node{
// State node for main account with balance.
{
NodeType: 2,
Path: []byte{},
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
Value: []byte{248, 119, 161, 32, 103, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 128, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
},
},
},
// Contract Test1 deployed by main account.
{
Hash: common.HexToHash("0x46ce57b700e470d0c0820ede662ecc0d0c78cf87237cb12a40a7ff5ff9cc8ac5"),
Number: big.NewInt(1),
StateNodes: []snapt.Node{
// Branch root node.
{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 81, 128, 128, 128, 128, 128, 128, 160, 173, 52, 73, 195, 118, 160, 81, 100, 138, 50, 127, 27, 188, 85, 147, 215, 187, 244, 219, 228, 93, 25, 72, 253, 160, 45, 16, 239, 130, 223, 160, 26, 128, 128, 160, 137, 52, 229, 60, 211, 96, 171, 177, 51, 19, 204, 180, 24, 252, 28, 70, 234, 7, 73, 20, 117, 230, 32, 223, 188, 6, 191, 75, 123, 64, 163, 197, 128, 128, 128, 128, 128, 128, 128},
},
// State node for sender account.
{
NodeType: 2,
Path: []byte{6},
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 1, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
},
// State node for deployment of contract Test1.
{
NodeType: 2,
Path: []byte{9},
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 243, 143, 159, 99, 199, 96, 208, 136, 215, 221, 4, 247, 67, 97, 155, 98, 145, 246, 59, 238, 189, 139, 223, 83, 6, 40, 249, 14, 156, 250, 82, 215, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
},
},
StorageNodes: [][]snapt.Node{
{},
{},
{
// Storage node for contract Test1 state variable initialCount.
{
NodeType: 2,
Path: []byte{},
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
Value: []byte{227, 161, 32, 177, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
},
},
},
},
// Contract Test2 deployed by main account.
{
Hash: common.HexToHash("0xa848b156fe4e61d8dac0a833720794e8c58e93fa6db369af6f0d9a19ada9d723"),
Number: big.NewInt(2),
StateNodes: []snapt.Node{
// Branch root node.
{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 81, 128, 128, 128, 128, 128, 128, 160, 191, 248, 9, 223, 101, 212, 255, 213, 196, 146, 160, 239, 69, 178, 134, 139, 81, 22, 255, 149, 90, 253, 178, 172, 102, 87, 249, 225, 224, 173, 183, 55, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 128, 128, 128, 128, 128},
},
// State node for sender account.
{
NodeType: 2,
Path: []byte{6},
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 2, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
},
// State node for deployment of contract Test2.
{
NodeType: 2,
Path: []byte{10},
Key: common.HexToHash("0xa44b5f4b47ded891709350af6a6e4d56602228a70279bdad4f0f64042445b4b9"),
Value: []byte{248, 105, 160, 52, 75, 95, 75, 71, 222, 216, 145, 112, 147, 80, 175, 106, 110, 77, 86, 96, 34, 40, 167, 2, 121, 189, 173, 79, 15, 100, 4, 36, 69, 180, 185, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 86, 36, 245, 233, 5, 167, 42, 118, 181, 35, 178, 216, 149, 56, 146, 147, 19, 8, 140, 137, 234, 0, 160, 27, 220, 33, 204, 6, 152, 239, 177, 52},
},
},
StorageNodes: [][]snapt.Node{
{},
{},
{
// Storage node for contract Test2 state variable test.
{
NodeType: 2,
Path: []byte{},
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
Value: []byte{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
},
},
},
},
// Increment contract Test1 state variable count using main account.
{
Hash: common.HexToHash("0x9fc4aaaab26f0b43ac609c99ae50925e5dc9a25f103c0511fcff38c6b3158302"),
Number: big.NewInt(3),
StateNodes: []snapt.Node{
// Branch root node.
{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 113, 128, 128, 128, 128, 128, 128, 160, 70, 53, 190, 199, 124, 254, 86, 213, 42, 126, 117, 155, 2, 223, 56, 167, 130, 118, 10, 150, 65, 46, 207, 169, 167, 250, 209, 64, 37, 205, 153, 51, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 160, 214, 109, 199, 206, 145, 11, 213, 44, 206, 214, 36, 181, 134, 92, 243, 178, 58, 88, 158, 42, 31, 125, 71, 148, 188, 122, 252, 100, 250, 182, 85, 159, 128, 128, 128, 128},
},
// State node for sender account.
{
NodeType: 2,
Path: []byte{6},
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 3, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
},
// State node for contract Test1 transaction.
{
NodeType: 2,
Path: []byte{9},
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 167, 171, 204, 110, 30, 52, 74, 189, 215, 97, 245, 227, 176, 141, 250, 205, 8, 182, 138, 101, 51, 150, 155, 174, 234, 246, 30, 128, 253, 230, 36, 228, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
},
},
StorageNodes: [][]snapt.Node{
{},
{},
{
// Branch root node.
{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 244, 152, 74, 17, 246, 26, 41, 33, 69, 97, 65, 223, 136, 222, 110, 26, 113, 13, 40, 104, 27, 145, 175, 121, 76, 90, 114, 30, 71, 131, 156, 215, 128, 128, 128, 128, 128},
},
// Storage node for contract Test1 state variable count.
{
NodeType: 2,
Path: []byte{2},
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
},
// Storage node for contract Test1 state variable initialCount.
{
NodeType: 2,
Path: []byte{11},
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
},
},
},
},
}
// Expected state nodes at snapshot height.
var ExpectedStateNodes = []snapt.Node{
{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 113, 128, 128, 128, 128, 128, 128, 160, 70, 53, 190, 199, 124, 254, 86, 213, 42, 126, 117, 155, 2, 223, 56, 167, 130, 118, 10, 150, 65, 46, 207, 169, 167, 250, 209, 64, 37, 205, 153, 51, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 160, 214, 109, 199, 206, 145, 11, 213, 44, 206, 214, 36, 181, 134, 92, 243, 178, 58, 88, 158, 42, 31, 125, 71, 148, 188, 122, 252, 100, 250, 182, 85, 159, 128, 128, 128, 128},
},
{
NodeType: 2,
Path: []byte{6},
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 3, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
},
{
NodeType: 2,
Path: []byte{9},
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 167, 171, 204, 110, 30, 52, 74, 189, 215, 97, 245, 227, 176, 141, 250, 205, 8, 182, 138, 101, 51, 150, 155, 174, 234, 246, 30, 128, 253, 230, 36, 228, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
},
{
NodeType: 2,
Path: []byte{10},
Key: common.HexToHash("0xa44b5f4b47ded891709350af6a6e4d56602228a70279bdad4f0f64042445b4b9"),
Value: []byte{248, 105, 160, 52, 75, 95, 75, 71, 222, 216, 145, 112, 147, 80, 175, 106, 110, 77, 86, 96, 34, 40, 167, 2, 121, 189, 173, 79, 15, 100, 4, 36, 69, 180, 185, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 86, 36, 245, 233, 5, 167, 42, 118, 181, 35, 178, 216, 149, 56, 146, 147, 19, 8, 140, 137, 234, 0, 160, 27, 220, 33, 204, 6, 152, 239, 177, 52},
},
}
type StorageNodeWithState struct {
snapt.Node
StatePath []byte
}
// Expected storage nodes at snapshot height.
var ExpectedStorageNodes = []StorageNodeWithState{
{
Node: snapt.Node{
NodeType: 0,
Path: []byte{},
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 244, 152, 74, 17, 246, 26, 41, 33, 69, 97, 65, 223, 136, 222, 110, 26, 113, 13, 40, 104, 27, 145, 175, 121, 76, 90, 114, 30, 71, 131, 156, 215, 128, 128, 128, 128, 128},
},
StatePath: []byte{9},
},
{
Node: snapt.Node{
NodeType: 2,
Path: []byte{2},
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
},
StatePath: []byte{9},
},
{
Node: snapt.Node{
NodeType: 2,
Path: []byte{11},
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
},
StatePath: []byte{9},
},
{
Node: snapt.Node{
NodeType: 2,
Path: []byte{},
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
Value: []byte{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
},
StatePath: []byte{10},
},
}
// Block header at snapshot height.
// Required in database when executing inPlaceStateSnapshot.
var Block4_Header = types.Header{
ParentHash: common.HexToHash("0x9fc4aaaab26f0b43ac609c99ae50925e5dc9a25f103c0511fcff38c6b3158302"),
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Bloom: types.Bloom{},
Difficulty: big.NewInt(+2),
Number: big.NewInt(4),
GasLimit: 4704588,
GasUsed: 0,
Time: 1492010458,
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
MixDigest: common.Hash{},
Nonce: types.BlockNonce{},
BaseFee: nil,
}
/*
pragma solidity ^0.8.0;
contract Test1 {
uint256 private count;
uint256 private initialCount;
event Increment(uint256 count);
constructor() {
initialCount = 1;
}
function incrementCount() public returns (uint256) {
count = count + 1;
emit Increment(count);
return count;
}
}
*/
/*
pragma solidity ^0.8.0;
contract Test2 {
uint256 private test;
constructor() {
test = 1;
}
}
*/

View File

@ -1,359 +0,0 @@
package fixture
var Block1_StateNodePaths = [][]byte{
[]byte{},
[]byte{0},
[]byte{0, 0},
[]byte{0, 2},
[]byte{0, 2, 1},
[]byte{0, 2, 8},
[]byte{0, 2, 12},
[]byte{0, 3},
[]byte{0, 4},
[]byte{0, 6},
[]byte{0, 6, 3},
[]byte{0, 6, 13},
[]byte{0, 7},
[]byte{0, 8},
[]byte{0, 8, 7},
[]byte{0, 8, 11},
[]byte{0, 9},
[]byte{0, 9, 9},
[]byte{0, 9, 10},
[]byte{0, 12},
[]byte{0, 13},
[]byte{0, 14},
[]byte{1},
[]byte{1, 2},
[]byte{1, 2, 5},
[]byte{1, 2, 7},
[]byte{1, 3},
[]byte{1, 3, 1},
[]byte{1, 3, 11},
[]byte{1, 4},
[]byte{1, 5},
[]byte{1, 5, 11},
[]byte{1, 5, 12},
[]byte{1, 5, 15},
[]byte{1, 6},
[]byte{1, 8},
[]byte{1, 10},
[]byte{1, 13},
[]byte{1, 14},
[]byte{1, 14, 2},
[]byte{1, 14, 11},
[]byte{1, 15},
[]byte{1, 15, 9},
[]byte{1, 15, 15},
[]byte{2},
[]byte{2, 0},
[]byte{2, 0, 9},
[]byte{2, 0, 14},
[]byte{2, 1},
[]byte{2, 1, 1},
[]byte{2, 1, 3},
[]byte{2, 1, 14},
[]byte{2, 5},
[]byte{2, 6},
[]byte{2, 9},
[]byte{2, 9, 1},
[]byte{2, 9, 7},
[]byte{2, 11},
[]byte{2, 11, 7},
[]byte{2, 11, 13},
[]byte{2, 13},
[]byte{2, 13, 1},
[]byte{2, 13, 15},
[]byte{2, 15},
[]byte{3},
[]byte{3, 0},
[]byte{3, 0, 0},
[]byte{3, 0, 1},
[]byte{3, 2},
[]byte{3, 2, 3},
[]byte{3, 2, 15},
[]byte{3, 3},
[]byte{3, 4},
[]byte{3, 4, 2},
[]byte{3, 4, 4},
[]byte{3, 4, 5},
[]byte{3, 6},
[]byte{3, 8},
[]byte{3, 9},
[]byte{3, 10},
[]byte{3, 10, 2},
[]byte{3, 10, 8},
[]byte{3, 10, 12},
[]byte{3, 11},
[]byte{3, 12},
[]byte{3, 13},
[]byte{3, 14},
[]byte{3, 14, 4},
[]byte{3, 14, 9},
[]byte{3, 14, 14},
[]byte{3, 14, 14, 10},
[]byte{3, 14, 14, 15},
[]byte{4},
[]byte{4, 0},
[]byte{4, 0, 6},
[]byte{4, 0, 15},
[]byte{4, 1},
[]byte{4, 2},
[]byte{4, 2, 1},
[]byte{4, 2, 11},
[]byte{4, 3},
[]byte{4, 5},
[]byte{4, 6},
[]byte{4, 7},
[]byte{4, 8},
[]byte{4, 11},
[]byte{4, 11, 6},
[]byte{4, 11, 9},
[]byte{4, 11, 12},
[]byte{4, 14},
[]byte{5},
[]byte{5, 0},
[]byte{5, 0, 3},
[]byte{5, 0, 9},
[]byte{5, 0, 15},
[]byte{5, 1},
[]byte{5, 1, 14},
[]byte{5, 1, 15},
[]byte{5, 2},
[]byte{5, 2, 8},
[]byte{5, 2, 10},
[]byte{5, 3},
[]byte{5, 4},
[]byte{5, 4, 6},
[]byte{5, 4, 12},
[]byte{5, 6},
[]byte{5, 8},
[]byte{5, 8, 3},
[]byte{5, 8, 11},
[]byte{5, 10},
[]byte{5, 11},
[]byte{5, 12},
[]byte{5, 13},
[]byte{5, 15},
[]byte{6},
[]byte{6, 0},
[]byte{6, 2},
[]byte{6, 2, 3},
[]byte{6, 2, 9},
[]byte{6, 4},
[]byte{6, 4, 0},
[]byte{6, 4, 0, 0},
[]byte{6, 4, 0, 5},
[]byte{6, 5},
[]byte{6, 5, 4},
[]byte{6, 5, 10},
[]byte{6, 5, 12},
[]byte{6, 5, 13},
[]byte{6, 6},
[]byte{6, 6, 0},
[]byte{6, 6, 8},
[]byte{6, 8},
[]byte{6, 8, 4},
[]byte{6, 8, 4, 2},
[]byte{6, 8, 4, 9},
[]byte{6, 8, 9},
[]byte{6, 10},
[]byte{6, 10, 1},
[]byte{6, 10, 14},
[]byte{6, 11},
[]byte{6, 11, 2},
[]byte{6, 11, 12},
[]byte{6, 11, 14},
[]byte{6, 13},
[]byte{6, 13, 2},
[]byte{6, 13, 12},
[]byte{7},
[]byte{7, 1},
[]byte{7, 5},
[]byte{7, 7},
[]byte{7, 8},
[]byte{7, 8, 2},
[]byte{7, 8, 5},
[]byte{7, 9},
[]byte{7, 13},
[]byte{7, 13, 1},
[]byte{7, 13, 1, 0},
[]byte{7, 13, 1, 13},
[]byte{7, 13, 7},
[]byte{7, 14},
[]byte{7, 14, 8},
[]byte{7, 14, 11},
[]byte{8},
[]byte{8, 0},
[]byte{8, 0, 3},
[]byte{8, 0, 11},
[]byte{8, 2},
[]byte{8, 4},
[]byte{8, 8},
[]byte{8, 9},
[]byte{8, 9, 3},
[]byte{8, 9, 13},
[]byte{8, 10},
[]byte{8, 12},
[]byte{8, 12, 3},
[]byte{8, 12, 15},
[]byte{8, 13},
[]byte{8, 15},
[]byte{8, 15, 8},
[]byte{8, 15, 13},
[]byte{9},
[]byte{9, 0},
[]byte{9, 5},
[]byte{9, 6},
[]byte{9, 6, 10},
[]byte{9, 6, 14},
[]byte{9, 7},
[]byte{9, 9},
[]byte{9, 14},
[]byte{9, 15},
[]byte{9, 15, 0},
[]byte{9, 15, 4},
[]byte{9, 15, 10},
[]byte{10},
[]byte{10, 0},
[]byte{10, 0, 9},
[]byte{10, 0, 10},
[]byte{10, 0, 15},
[]byte{10, 2},
[]byte{10, 3},
[]byte{10, 6},
[]byte{10, 8},
[]byte{10, 9},
[]byte{10, 10},
[]byte{10, 10, 5},
[]byte{10, 10, 8},
[]byte{10, 13},
[]byte{10, 13, 0},
[]byte{10, 13, 13},
[]byte{10, 14},
[]byte{10, 14, 4},
[]byte{10, 14, 11},
[]byte{10, 14, 11, 8},
[]byte{10, 14, 11, 14},
[]byte{10, 15},
[]byte{11},
[]byte{11, 0},
[]byte{11, 0, 2},
[]byte{11, 0, 15},
[]byte{11, 1},
[]byte{11, 2},
[]byte{11, 3},
[]byte{11, 4},
[]byte{11, 5},
[]byte{11, 7},
[]byte{11, 7, 12},
[]byte{11, 7, 15},
[]byte{11, 8},
[]byte{11, 8, 8},
[]byte{11, 8, 15},
[]byte{11, 9},
[]byte{11, 11},
[]byte{11, 12},
[]byte{11, 13},
[]byte{11, 14},
[]byte{11, 14, 0},
[]byte{11, 14, 0, 1},
[]byte{11, 14, 0, 3},
[]byte{11, 14, 8},
[]byte{11, 14, 13},
[]byte{12},
[]byte{12, 0},
[]byte{12, 0, 0},
[]byte{12, 0, 1},
[]byte{12, 0, 1, 3},
[]byte{12, 0, 1, 11},
[]byte{12, 0, 15},
[]byte{12, 2},
[]byte{12, 2, 9},
[]byte{12, 2, 12},
[]byte{12, 4},
[]byte{12, 5},
[]byte{12, 6},
[]byte{12, 6, 0},
[]byte{12, 6, 4},
[]byte{12, 6, 14},
[]byte{12, 7},
[]byte{12, 7, 0},
[]byte{12, 7, 12},
[]byte{12, 7, 13},
[]byte{12, 9},
[]byte{12, 11},
[]byte{12, 12},
[]byte{13},
[]byte{13, 2},
[]byte{13, 2, 0},
[]byte{13, 2, 2},
[]byte{13, 2, 4},
[]byte{13, 3},
[]byte{13, 3, 7},
[]byte{13, 3, 10},
[]byte{13, 5},
[]byte{13, 8},
[]byte{13, 8, 1},
[]byte{13, 8, 15},
[]byte{13, 9},
[]byte{13, 9, 0},
[]byte{13, 9, 14},
[]byte{13, 10},
[]byte{13, 12},
[]byte{13, 12, 8},
[]byte{13, 12, 11},
[]byte{13, 13},
[]byte{13, 13, 7},
[]byte{13, 13, 12},
[]byte{13, 14},
[]byte{14},
[]byte{14, 0},
[]byte{14, 1},
[]byte{14, 2},
[]byte{14, 2, 2},
[]byte{14, 2, 12},
[]byte{14, 3},
[]byte{14, 4},
[]byte{14, 5},
[]byte{14, 6},
[]byte{14, 6, 9},
[]byte{14, 6, 12},
[]byte{14, 7},
[]byte{14, 7, 4},
[]byte{14, 7, 12},
[]byte{14, 8},
[]byte{14, 8, 3},
[]byte{14, 8, 12},
[]byte{14, 8, 12, 0},
[]byte{14, 8, 12, 6},
[]byte{14, 10},
[]byte{14, 10, 6},
[]byte{14, 10, 12},
[]byte{14, 11},
[]byte{14, 11, 8},
[]byte{14, 11, 13},
[]byte{14, 12},
[]byte{14, 14},
[]byte{14, 14, 3},
[]byte{14, 14, 9},
[]byte{15},
[]byte{15, 0},
[]byte{15, 5},
[]byte{15, 6},
[]byte{15, 9},
[]byte{15, 9, 0},
[]byte{15, 9, 2},
[]byte{15, 9, 3},
[]byte{15, 11},
[]byte{15, 11, 1},
[]byte{15, 11, 6},
[]byte{15, 12},
[]byte{15, 12, 3},
[]byte{15, 12, 14},
[]byte{15, 12, 14, 7},
[]byte{15, 12, 14, 13},
[]byte{15, 13},
[]byte{15, 14},
[]byte{15, 15},
}

View File

@ -1,36 +0,0 @@
package fixture
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
)
var Block1_Header = types.Header{
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Bloom: types.Bloom{},
Difficulty: big.NewInt(+2),
Number: big.NewInt(+1),
GasLimit: 4704588,
GasUsed: 0,
Time: 1492010458,
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
MixDigest: common.Hash{},
Nonce: types.BlockNonce{},
BaseFee: nil,
}
var Block1_StateNode0 = snapt.Node{
NodeType: 0,
Path: []byte{12, 0},
Key: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
Value: []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128},
}

182
go.mod
View File

@ -1,53 +1,58 @@
module github.com/vulcanize/ipld-eth-state-snapshot module github.com/cerc-io/ipld-eth-state-snapshot
go 1.18 go 1.21
require ( require (
github.com/ethereum/go-ethereum v1.10.19 github.com/cerc-io/eth-iterator-utils v0.3.1
github.com/cerc-io/eth-testing v0.5.1
github.com/cerc-io/plugeth-statediff v0.3.1
github.com/ethereum/go-ethereum v1.14.5
github.com/golang/mock v1.6.0 github.com/golang/mock v1.6.0
github.com/ipfs/go-cid v0.1.0 github.com/prometheus/client_golang v1.16.0
github.com/ipfs/go-ipfs-blockstore v1.1.2 github.com/sirupsen/logrus v1.9.3
github.com/ipfs/go-ipfs-ds-help v1.1.0 github.com/spf13/cobra v1.5.0
github.com/jmoiron/sqlx v1.2.0 github.com/spf13/viper v1.12.0
github.com/multiformats/go-multihash v0.1.0 github.com/stretchr/testify v1.8.4
github.com/prometheus/client_golang v1.3.0
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0
github.com/spf13/viper v1.7.0
github.com/vulcanize/go-eth-state-node-iterator v1.1.1
) )
require ( require (
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/DataDog/zstd v1.5.5 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v1.1.0 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/georgysavva/scany v0.2.9 // indirect github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/go-kit/kit v0.10.0 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect
github.com/go-ole/go-ole v1.2.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-stack/stack v1.8.0 // indirect github.com/georgysavva/scany v1.2.1 // indirect
github.com/getsentry/sentry-go v0.22.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.6 // indirect github.com/gorilla/websocket v1.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-block-format v0.0.3 // indirect
github.com/ipfs/go-datastore v0.5.1 // indirect
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
github.com/ipfs/go-ipld-format v0.2.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-log/v2 v2.4.0 // indirect
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.11.0 // indirect github.com/jackc/pgconn v1.11.0 // indirect
github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgio v1.0.0 // indirect
@ -57,61 +62,62 @@ require (
github.com/jackc/pgtype v1.10.0 // indirect github.com/jackc/pgtype v1.10.0 // indirect
github.com/jackc/pgx/v4 v4.15.0 // indirect github.com/jackc/pgx/v4 v4.15.0 // indirect
github.com/jackc/puddle v1.2.1 // indirect github.com/jackc/puddle v1.2.1 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/klauspost/compress v1.16.7 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/pretty v0.3.0 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/lib/pq v1.10.2 // indirect github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.1 // indirect github.com/lib/pq v1.10.9 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect github.com/magiconair/properties v1.8.6 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.0.4 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multibase v0.0.3 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-varint v0.0.6 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect github.com/openrelayxyz/plugeth-utils v1.5.0 // indirect
github.com/onsi/gomega v1.13.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pelletier/go-toml v1.2.0 // indirect github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.1.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.7.0 // indirect github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.0.8 // indirect github.com/prometheus/procfs v0.11.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/smartystreets/assertions v1.0.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.1.2 // indirect github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.3.0 // indirect github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.0.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.3 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.2.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect
github.com/stretchr/testify v1.7.0 // indirect github.com/supranational/blst v0.3.11 // indirect
github.com/subosito/gotenv v1.2.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/thoas/go-funk v0.9.3 // indirect
github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.2.2 // indirect github.com/tklauser/numcpus v0.6.1 // indirect
go.uber.org/atomic v1.9.0 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.uber.org/goleak v1.1.11 // indirect golang.org/x/crypto v0.22.0 // indirect
go.uber.org/multierr v1.7.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
go.uber.org/zap v1.19.1 // indirect golang.org/x/sync v0.7.0 // indirect
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect golang.org/x/sys v0.20.0 // indirect
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect golang.org/x/term v0.19.0 // indirect
golang.org/x/sys v0.0.0-20211209171907-798191bca915 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/text v0.3.7 // indirect google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/appengine v1.6.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/ini.v1 v1.51.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
) )
replace github.com/ethereum/go-ethereum v1.10.19 => github.com/vulcanize/go-ethereum v1.10.19-statediff-4.1.0-alpha

998
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,256 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/cerc-io/plugeth-statediff/indexer (interfaces: Indexer)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
big "math/big"
reflect "reflect"
time "time"
interfaces "github.com/cerc-io/plugeth-statediff/indexer/interfaces"
models "github.com/cerc-io/plugeth-statediff/indexer/models"
types "github.com/cerc-io/plugeth-statediff/types"
common "github.com/ethereum/go-ethereum/common"
types0 "github.com/ethereum/go-ethereum/core/types"
gomock "github.com/golang/mock/gomock"
)
// MockgenIndexer is a mock of Indexer interface.
type MockgenIndexer struct {
ctrl *gomock.Controller
recorder *MockgenIndexerMockRecorder
}
// MockgenIndexerMockRecorder is the mock recorder for MockgenIndexer.
type MockgenIndexerMockRecorder struct {
mock *MockgenIndexer
}
// NewMockgenIndexer creates a new mock instance.
func NewMockgenIndexer(ctrl *gomock.Controller) *MockgenIndexer {
mock := &MockgenIndexer{ctrl: ctrl}
mock.recorder = &MockgenIndexerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockgenIndexer) EXPECT() *MockgenIndexerMockRecorder {
return m.recorder
}
// BeginTx mocks base method.
func (m *MockgenIndexer) BeginTx(arg0 *big.Int, arg1 context.Context) interfaces.Batch {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BeginTx", arg0, arg1)
ret0, _ := ret[0].(interfaces.Batch)
return ret0
}
// BeginTx indicates an expected call of BeginTx.
func (mr *MockgenIndexerMockRecorder) BeginTx(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginTx", reflect.TypeOf((*MockgenIndexer)(nil).BeginTx), arg0, arg1)
}
// ClearWatchedAddresses mocks base method.
func (m *MockgenIndexer) ClearWatchedAddresses() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClearWatchedAddresses")
ret0, _ := ret[0].(error)
return ret0
}
// ClearWatchedAddresses indicates an expected call of ClearWatchedAddresses.
func (mr *MockgenIndexerMockRecorder) ClearWatchedAddresses() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).ClearWatchedAddresses))
}
// Close mocks base method.
func (m *MockgenIndexer) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockgenIndexerMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockgenIndexer)(nil).Close))
}
// CurrentBlock mocks base method.
func (m *MockgenIndexer) CurrentBlock() (*models.HeaderModel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CurrentBlock")
ret0, _ := ret[0].(*models.HeaderModel)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CurrentBlock indicates an expected call of CurrentBlock.
func (mr *MockgenIndexerMockRecorder) CurrentBlock() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentBlock", reflect.TypeOf((*MockgenIndexer)(nil).CurrentBlock))
}
// DetectGaps mocks base method.
func (m *MockgenIndexer) DetectGaps(arg0, arg1 uint64) ([]*interfaces.BlockGap, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DetectGaps", arg0, arg1)
ret0, _ := ret[0].([]*interfaces.BlockGap)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DetectGaps indicates an expected call of DetectGaps.
func (mr *MockgenIndexerMockRecorder) DetectGaps(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetectGaps", reflect.TypeOf((*MockgenIndexer)(nil).DetectGaps), arg0, arg1)
}
// HasBlock mocks base method.
func (m *MockgenIndexer) HasBlock(arg0 common.Hash, arg1 uint64) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasBlock", arg0, arg1)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// HasBlock indicates an expected call of HasBlock.
func (mr *MockgenIndexerMockRecorder) HasBlock(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasBlock", reflect.TypeOf((*MockgenIndexer)(nil).HasBlock), arg0, arg1)
}
// InsertWatchedAddresses mocks base method.
func (m *MockgenIndexer) InsertWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InsertWatchedAddresses", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// InsertWatchedAddresses indicates an expected call of InsertWatchedAddresses.
func (mr *MockgenIndexerMockRecorder) InsertWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).InsertWatchedAddresses), arg0, arg1)
}
// LoadWatchedAddresses mocks base method.
func (m *MockgenIndexer) LoadWatchedAddresses() ([]common.Address, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LoadWatchedAddresses")
ret0, _ := ret[0].([]common.Address)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LoadWatchedAddresses indicates an expected call of LoadWatchedAddresses.
func (mr *MockgenIndexerMockRecorder) LoadWatchedAddresses() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).LoadWatchedAddresses))
}
// PushBlock mocks base method.
func (m *MockgenIndexer) PushBlock(arg0 *types0.Block, arg1 types0.Receipts, arg2 *big.Int) (interfaces.Batch, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushBlock", arg0, arg1, arg2)
ret0, _ := ret[0].(interfaces.Batch)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PushBlock indicates an expected call of PushBlock.
func (mr *MockgenIndexerMockRecorder) PushBlock(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushBlock", reflect.TypeOf((*MockgenIndexer)(nil).PushBlock), arg0, arg1, arg2)
}
// PushHeader mocks base method.
func (m *MockgenIndexer) PushHeader(arg0 interfaces.Batch, arg1 *types0.Header, arg2, arg3 *big.Int) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushHeader", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PushHeader indicates an expected call of PushHeader.
func (mr *MockgenIndexerMockRecorder) PushHeader(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushHeader", reflect.TypeOf((*MockgenIndexer)(nil).PushHeader), arg0, arg1, arg2, arg3)
}
// PushIPLD mocks base method.
func (m *MockgenIndexer) PushIPLD(arg0 interfaces.Batch, arg1 types.IPLD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushIPLD", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// PushIPLD indicates an expected call of PushIPLD.
func (mr *MockgenIndexerMockRecorder) PushIPLD(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushIPLD", reflect.TypeOf((*MockgenIndexer)(nil).PushIPLD), arg0, arg1)
}
// PushStateNode mocks base method.
func (m *MockgenIndexer) PushStateNode(arg0 interfaces.Batch, arg1 types.StateLeafNode, arg2 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PushStateNode", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PushStateNode indicates an expected call of PushStateNode.
func (mr *MockgenIndexerMockRecorder) PushStateNode(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushStateNode", reflect.TypeOf((*MockgenIndexer)(nil).PushStateNode), arg0, arg1, arg2)
}
// RemoveWatchedAddresses mocks base method.
func (m *MockgenIndexer) RemoveWatchedAddresses(arg0 []types.WatchAddressArg) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveWatchedAddresses", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveWatchedAddresses indicates an expected call of RemoveWatchedAddresses.
func (mr *MockgenIndexerMockRecorder) RemoveWatchedAddresses(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).RemoveWatchedAddresses), arg0)
}
// ReportDBMetrics mocks base method.
func (m *MockgenIndexer) ReportDBMetrics(arg0 time.Duration, arg1 <-chan bool) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "ReportDBMetrics", arg0, arg1)
}
// ReportDBMetrics indicates an expected call of ReportDBMetrics.
func (mr *MockgenIndexerMockRecorder) ReportDBMetrics(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportDBMetrics", reflect.TypeOf((*MockgenIndexer)(nil).ReportDBMetrics), arg0, arg1)
}
// SetWatchedAddresses mocks base method.
func (m *MockgenIndexer) SetWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetWatchedAddresses", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// SetWatchedAddresses indicates an expected call of SetWatchedAddresses.
func (mr *MockgenIndexerMockRecorder) SetWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).SetWatchedAddresses), arg0, arg1)
}

88
internal/mocks/indexer.go Normal file
View File

@ -0,0 +1,88 @@
package mocks
import (
"context"
"fmt"
"math/big"
"sync"
"testing"
"github.com/cerc-io/plugeth-statediff/indexer"
sdtypes "github.com/cerc-io/plugeth-statediff/types"
"github.com/ethereum/go-ethereum/core/types"
"github.com/golang/mock/gomock"
)
// Indexer just caches data but wraps a gomock instance, so we can mock other methods if needed
type Indexer struct {
*MockgenIndexer
sync.RWMutex
IndexerData
}
type IndexerData struct {
Headers map[uint64]*types.Header
StateNodes []sdtypes.StateLeafNode
IPLDs []sdtypes.IPLD
}
// no-op mock Batch
type Batch struct{}
// NewIndexer returns a mock indexer that caches data in lists
func NewIndexer(t *testing.T) *Indexer {
ctl := gomock.NewController(t)
return &Indexer{
MockgenIndexer: NewMockgenIndexer(ctl),
IndexerData: IndexerData{
Headers: make(map[uint64]*types.Header),
},
}
}
func (i *Indexer) PushHeader(_ indexer.Batch, header *types.Header, _, _ *big.Int) (string, error) {
i.Lock()
defer i.Unlock()
i.Headers[header.Number.Uint64()] = header
return header.Hash().String(), nil
}
func (i *Indexer) PushStateNode(_ indexer.Batch, stateNode sdtypes.StateLeafNode, _ string) error {
i.Lock()
defer i.Unlock()
i.StateNodes = append(i.StateNodes, stateNode)
return nil
}
func (i *Indexer) PushIPLD(_ indexer.Batch, ipld sdtypes.IPLD) error {
i.Lock()
defer i.Unlock()
i.IPLDs = append(i.IPLDs, ipld)
return nil
}
func (i *Indexer) BeginTx(_ *big.Int, _ context.Context) indexer.Batch {
return Batch{}
}
func (Batch) Submit() error { return nil }
func (Batch) BlockNumber() string { return "0" }
func (Batch) RollbackOnFailure(error) {}
// InterruptingIndexer triggers an artificial failure at a specific node count
type InterruptingIndexer struct {
*Indexer
InterruptAfter uint
}
func (i *InterruptingIndexer) PushStateNode(b indexer.Batch, stateNode sdtypes.StateLeafNode, h string) error {
i.RLock()
indexedCount := len(i.StateNodes)
i.RUnlock()
if indexedCount >= int(i.InterruptAfter) {
return fmt.Errorf("mock interrupt")
}
return i.Indexer.PushStateNode(b, stateNode, h)
}

View File

@ -18,7 +18,7 @@ package main
import ( import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/vulcanize/ipld-eth-state-snapshot/cmd" "github.com/cerc-io/ipld-eth-state-snapshot/cmd"
) )
func main() { func main() {

View File

@ -19,12 +19,12 @@ package prom
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql" mets "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
) )
// DBStatsGetter is an interface that gets sql.DBStats. // DBStatsGetter is an interface that gets sql.DBStats.
type DBStatsGetter interface { type DBStatsGetter interface {
Stats() sql.Stats Stats() mets.DbStats
} }
// DBStatsCollector implements the prometheus.Collector interface. // DBStatsCollector implements the prometheus.Collector interface.

View File

@ -33,7 +33,6 @@ var (
stateNodeCount prometheus.Counter stateNodeCount prometheus.Counter
storageNodeCount prometheus.Counter storageNodeCount prometheus.Counter
codeNodeCount prometheus.Counter
) )
func Init() { func Init() {
@ -52,13 +51,16 @@ func Init() {
Name: "storage_node_count", Name: "storage_node_count",
Help: "Number of storage nodes processed", Help: "Number of storage nodes processed",
}) })
}
codeNodeCount = promauto.NewCounter(prometheus.CounterOpts{ func RegisterGaugeFunc(name string, function func() float64) {
Namespace: namespace, promauto.NewGaugeFunc(
Subsystem: statsSubsystem, prometheus.GaugeOpts{
Name: "code_node_count", Namespace: namespace,
Help: "Number of code nodes processed", Subsystem: statsSubsystem,
}) Name: name,
Help: name,
}, function)
} }
// RegisterDBCollector create metric collector for given connection // RegisterDBCollector create metric collector for given connection
@ -75,16 +77,13 @@ func IncStateNodeCount() {
} }
} }
// IncStorageNodeCount increments the number of storage nodes processed // AddStorageNodeCount increments the number of storage nodes processed
func IncStorageNodeCount() { func AddStorageNodeCount(count int) {
if metrics { if metrics && count > 0 {
storageNodeCount.Inc() storageNodeCount.Add(float64(count))
} }
} }
// IncCodeNodeCount increments the number of code nodes processed func Enabled() bool {
func IncCodeNodeCount() { return metrics
if metrics {
codeNodeCount.Inc()
}
} }

174
pkg/prom/tracker.go Normal file
View File

@ -0,0 +1,174 @@
package prom
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
iterutil "github.com/cerc-io/eth-iterator-utils"
"github.com/cerc-io/eth-iterator-utils/tracker"
"github.com/ethereum/go-ethereum/trie"
)
var trackedIterCount atomic.Int32
// Tracker which wraps a tracked iterators in metrics-reporting iterators
type MetricsTracker struct {
*tracker.TrackerImpl
}
type metricsIterator struct {
trie.NodeIterator
id int32
// count uint
done bool
lastPath []byte
sync.RWMutex
}
func NewTracker(file string, bufsize uint) *MetricsTracker {
return &MetricsTracker{TrackerImpl: tracker.NewImpl(file, bufsize)}
}
func (t *MetricsTracker) wrap(tracked *tracker.Iterator) *metricsIterator {
startPath, endPath := tracked.Bounds()
pathDepth := max(max(len(startPath), len(endPath)), 1)
totalSteps := estimateSteps(startPath, endPath, pathDepth)
ret := &metricsIterator{
NodeIterator: tracked,
id: trackedIterCount.Add(1),
}
RegisterGaugeFunc(
fmt.Sprintf("tracked_iterator_%d", ret.id),
func() float64 {
ret.RLock()
done := ret.done
lastPath := ret.lastPath
ret.RUnlock()
if done {
return 100.0
}
if lastPath == nil {
return 0.0
}
// estimate remaining distance based on current position and node count
remainingSteps := estimateSteps(lastPath, endPath, pathDepth)
return (float64(totalSteps) - float64(remainingSteps)) / float64(totalSteps) * 100.0
})
return ret
}
func (t *MetricsTracker) Restore(ctor iterutil.IteratorConstructor) (
[]trie.NodeIterator, []trie.NodeIterator, error,
) {
iters, bases, err := t.TrackerImpl.Restore(ctor)
if err != nil {
return nil, nil, err
}
ret := make([]trie.NodeIterator, len(iters))
for i, tracked := range iters {
ret[i] = t.wrap(tracked)
}
return ret, bases, nil
}
func (t *MetricsTracker) Tracked(it trie.NodeIterator) trie.NodeIterator {
tracked := t.TrackerImpl.Tracked(it)
return t.wrap(tracked)
}
func (it *metricsIterator) Next(descend bool) bool {
ret := it.NodeIterator.Next(descend)
it.Lock()
defer it.Unlock()
if ret {
it.lastPath = it.Path()
} else {
it.done = true
}
return ret
}
// Estimate the number of iterations necessary to step from start to end.
func estimateSteps(start []byte, end []byte, depth int) uint64 {
// We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
// For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
// start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
// be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
// to move from 0500:06 is 16.
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
if 0 == len(start) {
start = []byte{0x0}
}
if 0 == len(end) {
end = []byte{0x10}
}
normalizedStart := make([]byte, depth)
normalizedEnd := make([]byte, depth)
for i := 0; i < depth; i++ {
if i < len(start) {
normalizedStart[i] = start[i]
}
if i < len(end) {
normalizedEnd[i] = end[i]
}
}
return normalizedStart, normalizedEnd
}
// We have no need to handle negative exponents, so uints are fine.
pow := func(x uint64, y uint) uint64 {
if 0 == y {
return 1
}
ret := x
for i := uint(0); i < y; i++ {
ret *= x
}
return x
}
// Fix the paths.
start, end = normalizePathRange(start, end, depth)
// No negative distances, if the start is already >= end, the distance is 0.
if bytes.Compare(start, end) >= 0 {
return 0
}
// Subtract each component, right to left, carrying over if necessary.
difference := make([]byte, len(start))
var carry byte = 0
for i := len(start) - 1; i >= 0; i-- {
result := end[i] - start[i] - carry
if result > 0xf && i > 0 {
result &= 0xf
carry = 1
} else {
carry = 0
}
difference[i] = result
}
// Calculate the result.
var ret uint64 = 0
for i := 0; i < len(difference); i++ {
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
}
return ret
}
func max(a int, b int) int {
if a > b {
return a
}
return b
}

View File

@ -19,10 +19,13 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/cerc-io/plugeth-statediff/indexer/database/file"
ethNode "github.com/ethereum/go-ethereum/statediff/indexer/node" "github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
ethNode "github.com/cerc-io/plugeth-statediff/indexer/node"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -38,50 +41,55 @@ const (
// Config contains params for both databases the service uses // Config contains params for both databases the service uses
type Config struct { type Config struct {
Eth *EthConfig Eth *EthDBConfig
DB *DBConfig DB *DBConfig
File *FileConfig File *FileConfig
Service *ServiceConfig
} }
// EthConfig is config parameters for the chain. // EthDBConfig is config parameters for the chain DB.
type EthConfig struct { type EthDBConfig struct {
LevelDBPath string DBPath string
AncientDBPath string AncientDBPath string
NodeInfo ethNode.Info NodeInfo ethNode.Info
} }
// DBConfig is config parameters for DB. // DBConfig contains options for DB output mode.
type DBConfig struct { type DBConfig = postgres.Config
URI string
ConnConfig postgres.Config
}
type FileConfig struct { // FileConfig contains options for file output mode. Note that this service currently only supports
OutputDir string // CSV output, and does not record watched addresses, so not all fields are used.
type FileConfig = file.Config
type ServiceConfig struct {
AllowedAccounts []common.Address
} }
func NewConfig(mode SnapshotMode) (*Config, error) { func NewConfig(mode SnapshotMode) (*Config, error) {
ret := &Config{ ret := &Config{
&EthConfig{}, &EthDBConfig{},
&DBConfig{}, &DBConfig{},
&FileConfig{}, &FileConfig{},
&ServiceConfig{},
} }
return ret, ret.Init(mode) return ret, ret.Init(mode)
} }
func NewInPlaceSnapshotConfig() *Config { func NewInPlaceSnapshotConfig() *Config {
ret := &Config{ ret := &Config{
&EthConfig{}, &EthDBConfig{},
&DBConfig{}, &DBConfig{},
&FileConfig{}, &FileConfig{},
&ServiceConfig{},
} }
ret.DB.Init() InitDB(ret.DB)
return ret return ret
} }
// Init Initialises config // Init Initialises config
func (c *Config) Init(mode SnapshotMode) error { func (c *Config) Init(mode SnapshotMode) error {
viper.BindEnv(LOG_FILE_TOML, LOG_FILE)
viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID) viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID)
viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME) viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME)
viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK) viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK)
@ -96,24 +104,27 @@ func (c *Config) Init(mode SnapshotMode) error {
ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML), ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML),
} }
viper.BindEnv(ANCIENT_DB_PATH_TOML, ANCIENT_DB_PATH) viper.BindEnv(ETHDB_ANCIENT_TOML, ETHDB_ANCIENT)
viper.BindEnv(LVL_DB_PATH_TOML, LVL_DB_PATH) viper.BindEnv(ETHDB_PATH_TOML, ETHDB_PATH)
c.Eth.AncientDBPath = viper.GetString(ANCIENT_DB_PATH_TOML) c.Eth.DBPath = viper.GetString(ETHDB_PATH_TOML)
c.Eth.LevelDBPath = viper.GetString(LVL_DB_PATH_TOML) c.Eth.AncientDBPath = viper.GetString(ETHDB_ANCIENT_TOML)
if len(c.Eth.AncientDBPath) == 0 {
c.Eth.AncientDBPath = c.Eth.DBPath + "/ancient"
}
switch mode { switch mode {
case FileSnapshot: case FileSnapshot:
c.File.Init() InitFile(c.File)
case PgSnapshot: case PgSnapshot:
c.DB.Init() InitDB(c.DB)
default: default:
return fmt.Errorf("no output mode specified") return fmt.Errorf("no output mode specified")
} }
return nil return c.Service.Init()
} }
func (c *DBConfig) Init() { func InitDB(c *DBConfig) {
viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME) viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME)
viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME) viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME)
viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT) viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT)
@ -123,28 +134,55 @@ func (c *DBConfig) Init() {
viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS) viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS)
viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME) viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME)
dbParams := postgres.Config{}
// DB params // DB params
dbParams.DatabaseName = viper.GetString(DATABASE_NAME_TOML) c.DatabaseName = viper.GetString(DATABASE_NAME_TOML)
dbParams.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML) c.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML)
dbParams.Port = viper.GetInt(DATABASE_PORT_TOML) c.Port = viper.GetInt(DATABASE_PORT_TOML)
dbParams.Username = viper.GetString(DATABASE_USER_TOML) c.Username = viper.GetString(DATABASE_USER_TOML)
dbParams.Password = viper.GetString(DATABASE_PASSWORD_TOML) c.Password = viper.GetString(DATABASE_PASSWORD_TOML)
// Connection config // Connection config
dbParams.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML) c.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML)
dbParams.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML) c.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML)
dbParams.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second c.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second
c.ConnConfig = dbParams c.Driver = postgres.SQLX
c.URI = dbParams.DbConnectionString()
} }
func (c *FileConfig) Init() error { func InitFile(c *FileConfig) error {
viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR) viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR)
c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML) c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML)
if c.OutputDir == "" { if c.OutputDir == "" {
logrus.Infof("no output directory set, using default: %s", defaultOutputDir) logrus.Infof("no output directory set, using default: %s", defaultOutputDir)
c.OutputDir = defaultOutputDir c.OutputDir = defaultOutputDir
} }
// Only support CSV for now
c.Mode = file.CSV
return nil
}
func (c *ServiceConfig) Init() error {
viper.BindEnv(SNAPSHOT_BLOCK_HEIGHT_TOML, SNAPSHOT_BLOCK_HEIGHT)
viper.BindEnv(SNAPSHOT_MODE_TOML, SNAPSHOT_MODE)
viper.BindEnv(SNAPSHOT_WORKERS_TOML, SNAPSHOT_WORKERS)
viper.BindEnv(SNAPSHOT_RECOVERY_FILE_TOML, SNAPSHOT_RECOVERY_FILE)
viper.BindEnv(PROM_DB_STATS_TOML, PROM_DB_STATS)
viper.BindEnv(PROM_HTTP_TOML, PROM_HTTP)
viper.BindEnv(PROM_HTTP_ADDR_TOML, PROM_HTTP_ADDR)
viper.BindEnv(PROM_HTTP_PORT_TOML, PROM_HTTP_PORT)
viper.BindEnv(PROM_METRICS_TOML, PROM_METRICS)
viper.BindEnv(SNAPSHOT_ACCOUNTS_TOML, SNAPSHOT_ACCOUNTS)
var allowedAccounts []string
viper.UnmarshalKey(SNAPSHOT_ACCOUNTS_TOML, &allowedAccounts)
accountsLen := len(allowedAccounts)
if accountsLen != 0 {
c.AllowedAccounts = make([]common.Address, 0, accountsLen)
for _, allowedAccount := range allowedAccounts {
c.AllowedAccounts = append(c.AllowedAccounts, common.HexToAddress(allowedAccount))
}
} else {
logrus.Infof("no snapshot addresses specified, will perform snapshot of entire trie(s)")
}
return nil return nil
} }

View File

@ -0,0 +1,27 @@
package snapshot_test
import (
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
ethnode "github.com/cerc-io/plugeth-statediff/indexer/node"
)
var (
DefaultNodeInfo = ethnode.Info{
ID: "test_nodeid",
ClientName: "test_client",
GenesisBlock: "TEST_GENESIS",
NetworkID: "test_network",
ChainID: 0,
}
DefaultPgConfig = postgres.Config{
Hostname: "localhost",
Port: 8077,
DatabaseName: "cerc_testing",
Username: "vdbm",
Password: "password",
MaxIdle: 0,
MaxConnLifetime: 0,
MaxConns: 4,
}
)

View File

@ -21,11 +21,10 @@ const (
SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS" SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS"
SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE" SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE"
SNAPSHOT_MODE = "SNAPSHOT_MODE" SNAPSHOT_MODE = "SNAPSHOT_MODE"
SNAPSHOT_START_HEIGHT = "SNAPSHOT_START_HEIGHT" SNAPSHOT_ACCOUNTS = "SNAPSHOT_ACCOUNTS"
SNAPSHOT_END_HEIGHT = "SNAPSHOT_END_HEIGHT"
LOGRUS_LEVEL = "LOGRUS_LEVEL" LOG_LEVEL = "LOG_LEVEL"
LOGRUS_FILE = "LOGRUS_FILE" LOG_FILE = "LOG_FILE"
PROM_METRICS = "PROM_METRICS" PROM_METRICS = "PROM_METRICS"
PROM_HTTP = "PROM_HTTP" PROM_HTTP = "PROM_HTTP"
@ -35,8 +34,8 @@ const (
FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR" FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR"
ANCIENT_DB_PATH = "ANCIENT_DB_PATH" ETHDB_ANCIENT = "ETHDB_ANCIENT"
LVL_DB_PATH = "LVL_DB_PATH" ETHDB_PATH = "ETHDB_PATH"
ETH_CLIENT_NAME = "ETH_CLIENT_NAME" ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK" ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
@ -60,11 +59,10 @@ const (
SNAPSHOT_WORKERS_TOML = "snapshot.workers" SNAPSHOT_WORKERS_TOML = "snapshot.workers"
SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile" SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile"
SNAPSHOT_MODE_TOML = "snapshot.mode" SNAPSHOT_MODE_TOML = "snapshot.mode"
SNAPSHOT_START_HEIGHT_TOML = "snapshot.startHeight" SNAPSHOT_ACCOUNTS_TOML = "snapshot.accounts"
SNAPSHOT_END_HEIGHT_TOML = "snapshot.endHeight"
LOGRUS_LEVEL_TOML = "log.level" LOG_LEVEL_TOML = "log.level"
LOGRUS_FILE_TOML = "log.file" LOG_FILE_TOML = "log.file"
PROM_METRICS_TOML = "prom.metrics" PROM_METRICS_TOML = "prom.metrics"
PROM_HTTP_TOML = "prom.http" PROM_HTTP_TOML = "prom.http"
@ -74,8 +72,8 @@ const (
FILE_OUTPUT_DIR_TOML = "file.outputDir" FILE_OUTPUT_DIR_TOML = "file.outputDir"
ANCIENT_DB_PATH_TOML = "leveldb.ancient" ETHDB_ANCIENT_TOML = "ethdb.ancient"
LVL_DB_PATH_TOML = "leveldb.path" ETHDB_PATH_TOML = "ethdb.path"
ETH_CLIENT_NAME_TOML = "ethereum.clientName" ETH_CLIENT_NAME_TOML = "ethereum.clientName"
ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock" ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock"
@ -99,11 +97,10 @@ const (
SNAPSHOT_WORKERS_CLI = "workers" SNAPSHOT_WORKERS_CLI = "workers"
SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file" SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file"
SNAPSHOT_MODE_CLI = "snapshot-mode" SNAPSHOT_MODE_CLI = "snapshot-mode"
SNAPSHOT_START_HEIGHT_CLI = "start-height" SNAPSHOT_ACCOUNTS_CLI = "snapshot-accounts"
SNAPSHOT_END_HEIGHT_CLI = "end-height"
LOGRUS_LEVEL_CLI = "log-level" LOG_LEVEL_CLI = "log-level"
LOGRUS_FILE_CLI = "log-file" LOG_FILE_CLI = "log-file"
PROM_METRICS_CLI = "prom-metrics" PROM_METRICS_CLI = "prom-metrics"
PROM_HTTP_CLI = "prom-http" PROM_HTTP_CLI = "prom-http"
@ -113,8 +110,8 @@ const (
FILE_OUTPUT_DIR_CLI = "output-dir" FILE_OUTPUT_DIR_CLI = "output-dir"
ANCIENT_DB_PATH_CLI = "ancient-path" ETHDB_ANCIENT_CLI = "ancient-path"
LVL_DB_PATH_CLI = "leveldb-path" ETHDB_PATH_CLI = "ethdb-path"
ETH_CLIENT_NAME_CLI = "ethereum-client-name" ETH_CLIENT_NAME_CLI = "ethereum-client-name"
ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block" ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block"

View File

@ -1,302 +0,0 @@
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package publisher
import (
"encoding/csv"
"fmt"
"math/big"
"os"
"path/filepath"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
"github.com/sirupsen/logrus"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
)
var _ snapt.Publisher = (*publisher)(nil)
var (
// tables written once per block
perBlockTables = []*snapt.Table{
&snapt.TableIPLDBlock,
&snapt.TableNodeInfo,
&snapt.TableHeader,
}
// tables written during state iteration
perNodeTables = []*snapt.Table{
&snapt.TableIPLDBlock,
&snapt.TableStateNode,
&snapt.TableStorageNode,
}
)
const logInterval = 1 * time.Minute
type publisher struct {
dir string // dir containing output files
writers fileWriters
nodeInfo nodeinfo.Info
startTime time.Time
currBatchSize uint
stateNodeCounter uint64
storageNodeCounter uint64
codeNodeCounter uint64
txCounter uint32
}
type fileWriter struct {
*csv.Writer
}
// fileWriters wraps the file writers for each output table
type fileWriters map[string]fileWriter
type fileTx struct{ fileWriters }
func (tx fileWriters) Commit() error {
for _, w := range tx {
w.Flush()
if err := w.Error(); err != nil {
return err
}
}
return nil
}
func (fileWriters) Rollback() error { return nil } // TODO: delete the file?
func newFileWriter(path string) (ret fileWriter, err error) {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return
}
ret = fileWriter{csv.NewWriter(file)}
return
}
func (tx fileWriters) write(tbl *snapt.Table, args ...interface{}) error {
row := tbl.ToCsvRow(args...)
return tx[tbl.Name].Write(row)
}
func makeFileWriters(dir string, tables []*snapt.Table) (fileWriters, error) {
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
writers := fileWriters{}
for _, tbl := range tables {
w, err := newFileWriter(TableFile(dir, tbl.Name))
if err != nil {
return nil, err
}
writers[tbl.Name] = w
}
return writers, nil
}
// NewPublisher creates a publisher which writes to per-table CSV files which can be imported
// with the Postgres COPY command.
// The output directory will be created if it does not exist.
func NewPublisher(path string, node nodeinfo.Info) (*publisher, error) {
if err := os.MkdirAll(path, 0777); err != nil {
return nil, fmt.Errorf("unable to make MkdirAll for path: %s err: %s", path, err)
}
writers, err := makeFileWriters(path, perBlockTables)
if err != nil {
return nil, err
}
pub := &publisher{
writers: writers,
dir: path,
nodeInfo: node,
startTime: time.Now(),
}
go pub.logNodeCounters()
return pub, nil
}
func TableFile(dir, name string) string { return filepath.Join(dir, name+".csv") }
func (p *publisher) txDir(index uint32) string {
return filepath.Join(p.dir, fmt.Sprintf("%010d", index))
}
func (p *publisher) BeginTx() (snapt.Tx, error) {
index := atomic.AddUint32(&p.txCounter, 1) - 1
dir := p.txDir(index)
writers, err := makeFileWriters(dir, perNodeTables)
if err != nil {
return nil, err
}
return fileTx{writers}, nil
}
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
// returns the CID and blockstore prefixed multihash key
func (tx fileWriters) publishRaw(codec uint64, raw []byte, height *big.Int) (cid, prefixedKey string, err error) {
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
if err != nil {
return
}
cid = c.String()
prefixedKey, err = tx.publishIPLD(c, raw, height)
return
}
func (tx fileWriters) publishIPLD(c cid.Cid, raw []byte, height *big.Int) (string, error) {
dbKey := dshelp.MultihashToDsKey(c.Hash())
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
return prefixedKey, tx.write(&snapt.TableIPLDBlock, height.String(), prefixedKey, raw)
}
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary
// indexes in the header_cids table
func (p *publisher) PublishHeader(header *types.Header) error {
headerNode, err := ipld.NewEthHeader(header)
if err != nil {
return err
}
if _, err = p.writers.publishIPLD(headerNode.Cid(), headerNode.RawData(), header.Number); err != nil {
return err
}
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
err = p.writers.write(&snapt.TableNodeInfo, p.nodeInfo.GenesisBlock, p.nodeInfo.NetworkID, p.nodeInfo.ID,
p.nodeInfo.ClientName, p.nodeInfo.ChainID)
if err != nil {
return err
}
err = p.writers.write(&snapt.TableHeader, header.Number.String(), header.Hash().Hex(), header.ParentHash.Hex(),
headerNode.Cid().String(), 0, p.nodeInfo.ID, 0, header.Root.Hex(), header.TxHash.Hex(),
header.ReceiptHash.Hex(), header.UncleHash.Hex(), header.Bloom.Bytes(), header.Time, mhKey,
0, header.Coinbase.String())
if err != nil {
return err
}
return p.writers.Commit()
}
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes
// in the state_cids table
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, height *big.Int, snapTx snapt.Tx) error {
var stateKey string
if !snapt.IsNullHash(node.Key) {
stateKey = node.Key.Hex()
}
tx := snapTx.(fileTx)
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value, height)
if err != nil {
return err
}
err = tx.write(&snapt.TableStateNode, height.String(), headerID, stateKey, stateCIDStr, node.Path,
node.NodeType, false, mhKey)
if err != nil {
return err
}
// increment state node counter.
atomic.AddUint64(&p.stateNodeCounter, 1)
prom.IncStateNodeCount()
// increment current batch size counter
p.currBatchSize += 2
return err
}
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary
// indexes in the storage_cids table
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, height *big.Int, statePath []byte, snapTx snapt.Tx) error {
var storageKey string
if !snapt.IsNullHash(node.Key) {
storageKey = node.Key.Hex()
}
tx := snapTx.(fileTx)
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value, height)
if err != nil {
return err
}
err = tx.write(&snapt.TableStorageNode, height.String(), headerID, statePath, storageKey, storageCIDStr, node.Path,
node.NodeType, false, mhKey)
if err != nil {
return err
}
// increment storage node counter.
atomic.AddUint64(&p.storageNodeCounter, 1)
prom.IncStorageNodeCount()
// increment current batch size counter
p.currBatchSize += 2
return nil
}
// PublishCode writes code to the ipfs backing pg datastore
func (p *publisher) PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
// no codec for code, doesn't matter though since blockstore key is multihash-derived
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
tx := snapTx.(fileTx)
if err = tx.write(&snapt.TableIPLDBlock, height.String(), mhKey, codeBytes); err != nil {
return fmt.Errorf("error publishing code IPLD: %v", err)
}
// increment code node counter.
atomic.AddUint64(&p.codeNodeCounter, 1)
prom.IncCodeNodeCount()
p.currBatchSize++
return nil
}
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
return tx, nil
}
// logNodeCounters periodically logs the number of node processed.
func (p *publisher) logNodeCounters() {
t := time.NewTicker(logInterval)
for range t.C {
p.printNodeCounters("progress")
}
}
func (p *publisher) printNodeCounters(msg string) {
logrus.WithFields(logrus.Fields{
"runtime": time.Now().Sub(p.startTime).String(),
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
}).Info(msg)
}

View File

@ -1,130 +0,0 @@
package publisher
import (
"context"
"encoding/csv"
"fmt"
"io"
"os"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
"github.com/vulcanize/ipld-eth-state-snapshot/test"
)
var (
pgConfig = test.DefaultPgConfig
nodeInfo = test.DefaultNodeInfo
// tables ordered according to fkey depedencies
allTables = []*snapt.Table{
&snapt.TableIPLDBlock,
&snapt.TableNodeInfo,
&snapt.TableHeader,
&snapt.TableStateNode,
&snapt.TableStorageNode,
}
)
func writeFiles(t *testing.T, dir string) *publisher {
pub, err := NewPublisher(dir, nodeInfo)
test.NoError(t, err)
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
tx, err := pub.BeginTx()
test.NoError(t, err)
headerID := fixt.Block1_Header.Hash().String()
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, fixt.Block1_Header.Number, tx))
test.NoError(t, tx.Commit())
return pub
}
// verify that we can parse the csvs
// TODO check actual data
func verifyFileData(t *testing.T, path string, tbl *snapt.Table) {
file, err := os.Open(path)
test.NoError(t, err)
r := csv.NewReader(file)
test.NoError(t, err)
r.FieldsPerRecord = len(tbl.Columns)
for {
_, err := r.Read()
if err == io.EOF {
break
}
test.NoError(t, err)
}
}
func TestWriting(t *testing.T) {
dir := t.TempDir()
// tempdir like /tmp/TempFoo/001/, TempFoo defaults to 0700
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
pub := writeFiles(t, dir)
for _, tbl := range perBlockTables {
verifyFileData(t, TableFile(pub.dir, tbl.Name), tbl)
}
for i := uint32(0); i < pub.txCounter; i++ {
for _, tbl := range perNodeTables {
verifyFileData(t, TableFile(pub.txDir(i), tbl.Name), tbl)
}
}
}
// Note: DB user requires role membership "pg_read_server_files"
func TestPgCopy(t *testing.T) {
test.NeedsDB(t)
dir := t.TempDir()
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
pub := writeFiles(t, dir)
ctx := context.Background()
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
test.NoError(t, err)
db := postgres.NewPostgresDB(driver)
sql.TearDownDB(t, db)
// copy from files
pgCopyStatement := `COPY %s FROM '%s' CSV`
for _, tbl := range perBlockTables {
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.dir, tbl.Name))
_, err = db.Exec(ctx, stm)
test.NoError(t, err)
}
for i := uint32(0); i < pub.txCounter; i++ {
for _, tbl := range perNodeTables {
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.txDir(i), tbl.Name))
_, err = db.Exec(ctx, stm)
test.NoError(t, err)
}
}
// check header was successfully committed
pgQueryHeader := `SELECT cid, block_hash
FROM eth.header_cids
WHERE block_number = $1`
type res struct {
CID string
BlockHash string
}
var header res
err = db.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
&header.CID, &header.BlockHash)
test.NoError(t, err)
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
test.NoError(t, err)
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
}

View File

@ -1,61 +0,0 @@
// Copyright © 2022 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package snapshot
import (
"github.com/jmoiron/sqlx"
"github.com/sirupsen/logrus"
. "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
)
const (
stateSnapShotPgStr = "SELECT state_snapshot($1, $2)"
storageSnapShotPgStr = "SELECT storage_snapshot($1, $2)"
)
type InPlaceSnapshotParams struct {
StartHeight uint64
EndHeight uint64
}
func CreateInPlaceSnapshot(config *Config, params InPlaceSnapshotParams) error {
db, err := sqlx.Connect("postgres", config.DB.ConnConfig.DbConnectionString())
if err != nil {
return err
}
tx, err := db.Begin()
if err != nil {
return err
}
defer func() {
err = CommitOrRollback(tx, err)
if err != nil {
logrus.Errorf("CommitOrRollback failed: %s", err)
}
}()
if _, err = tx.Exec(stateSnapShotPgStr, params.StartHeight, params.EndHeight); err != nil {
return err
}
if _, err = tx.Exec(storageSnapShotPgStr, params.StartHeight, params.EndHeight); err != nil {
return err
}
return nil
}

View File

@ -1,160 +0,0 @@
// Copyright © 2022 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package snapshot
import (
"context"
"strconv"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/multiformats/go-multihash"
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/pg"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
"github.com/vulcanize/ipld-eth-state-snapshot/test"
)
var (
pgConfig = test.DefaultPgConfig
nodeInfo = test.DefaultNodeInfo
snapshotHeight = 4
allTables = []*snapt.Table{
&snapt.TableIPLDBlock,
&snapt.TableNodeInfo,
&snapt.TableHeader,
&snapt.TableStateNode,
&snapt.TableStorageNode,
}
)
func writeData(t *testing.T, db *postgres.DB) snapt.Publisher {
pub := pg.NewPublisher(db)
tx, err := pub.BeginTx()
test.NoError(t, err)
for _, block := range fixt.InPlaceSnapshotBlocks {
headerID := block.Hash.String()
for _, stateNode := range block.StateNodes {
test.NoError(t, pub.PublishStateNode(&stateNode, headerID, block.Number, tx))
}
for index, stateStorageNodes := range block.StorageNodes {
stateNode := block.StateNodes[index]
for _, storageNode := range stateStorageNodes {
test.NoError(t, pub.PublishStorageNode(&storageNode, headerID, block.Number, stateNode.Path, tx))
}
}
}
test.NoError(t, tx.Commit())
test.NoError(t, pub.PublishHeader(&fixt.Block4_Header))
return pub
}
func TestCreateInPlaceSnapshot(t *testing.T) {
test.NeedsDB(t)
ctx := context.Background()
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
test.NoError(t, err)
db := postgres.NewPostgresDB(driver)
sql.TearDownDB(t, db)
_ = writeData(t, db)
params := InPlaceSnapshotParams{StartHeight: uint64(0), EndHeight: uint64(snapshotHeight)}
config := &Config{
Eth: &EthConfig{
NodeInfo: test.DefaultNodeInfo,
},
DB: &DBConfig{
URI: pgConfig.DbConnectionString(),
ConnConfig: pgConfig,
},
}
err = CreateInPlaceSnapshot(config, params)
test.NoError(t, err)
// Check inplace snapshot was created for state_cids
stateNodes := make([]models.StateNodeModel, 0)
pgQueryStateCids := `SELECT cast(state_cids.block_number AS TEXT), state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id, state_cids.mh_key
FROM eth.state_cids
WHERE eth.state_cids.block_number = $1
ORDER BY state_cids.state_path`
err = db.Select(ctx, &stateNodes, pgQueryStateCids, snapshotHeight)
test.NoError(t, err)
test.ExpectEqual(t, 4, len(stateNodes))
expectedStateNodes := fixt.ExpectedStateNodes
pgIpfsGet := `SELECT data FROM public.blocks
WHERE key = $1 AND block_number = $2`
for index, stateNode := range stateNodes {
var data []byte
err = db.Get(ctx, &data, pgIpfsGet, stateNode.MhKey, snapshotHeight)
test.NoError(t, err)
expectedStateNode := expectedStateNodes[index]
expectedCID, _ := ipld.RawdataToCid(ipld.MEthStateTrie, expectedStateNode.Value, multihash.KECCAK_256)
test.ExpectEqual(t, strconv.Itoa(snapshotHeight), stateNode.BlockNumber)
test.ExpectEqual(t, fixt.Block4_Header.Hash().String(), stateNode.HeaderID)
test.ExpectEqual(t, expectedCID.String(), stateNode.CID)
test.ExpectEqual(t, int(expectedStateNode.NodeType), stateNode.NodeType)
test.ExpectEqual(t, expectedStateNode.Key, common.HexToHash(stateNode.StateKey))
test.ExpectEqual(t, false, stateNode.Diff)
test.ExpectEqualBytes(t, expectedStateNode.Path, stateNode.Path)
test.ExpectEqualBytes(t, expectedStateNode.Value, data)
}
// Check inplace snapshot was created for storage_cids
storageNodes := make([]models.StorageNodeModel, 0)
pgQueryStorageCids := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, storage_cids.state_path, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path, storage_cids.mh_key, storage_cids.header_id
FROM eth.storage_cids
WHERE eth.storage_cids.block_number = $1
ORDER BY storage_cids.state_path, storage_cids.storage_path`
err = db.Select(ctx, &storageNodes, pgQueryStorageCids, snapshotHeight)
test.NoError(t, err)
for index, storageNode := range storageNodes {
expectedStorageNode := fixt.ExpectedStorageNodes[index]
expectedStorageCID, _ := ipld.RawdataToCid(ipld.MEthStorageTrie, expectedStorageNode.Value, multihash.KECCAK_256)
test.ExpectEqual(t, strconv.Itoa(snapshotHeight), storageNode.BlockNumber)
test.ExpectEqual(t, fixt.Block4_Header.Hash().String(), storageNode.HeaderID)
test.ExpectEqual(t, expectedStorageCID.String(), storageNode.CID)
test.ExpectEqual(t, int(expectedStorageNode.NodeType), storageNode.NodeType)
test.ExpectEqual(t, expectedStorageNode.Key, common.HexToHash(storageNode.StorageKey))
test.ExpectEqual(t, expectedStorageNode.StatePath, storageNode.StatePath)
test.ExpectEqual(t, expectedStorageNode.Path, storageNode.Path)
test.ExpectEqual(t, false, storageNode.Diff)
var data []byte
err = db.Get(ctx, &data, pgIpfsGet, storageNode.MhKey, snapshotHeight)
test.NoError(t, err)
test.ExpectEqualBytes(t, expectedStorageNode.Value, data)
}
}

View File

@ -1,26 +0,0 @@
package mock
import (
"fmt"
"github.com/golang/mock/gomock"
)
type anyOfMatcher struct {
values []interface{}
}
func (m anyOfMatcher) Matches(x interface{}) bool {
for _, v := range m.values {
if gomock.Eq(v).Matches(x) {
return true
}
}
return false
}
func (m anyOfMatcher) String() string {
return fmt.Sprintf("is equal to any of %+v", m.values)
}
func AnyOf(xs ...interface{}) anyOfMatcher {
return anyOfMatcher{xs}
}

View File

@ -1,251 +0,0 @@
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package pg
import (
"context"
"fmt"
"math/big"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
)
var _ snapt.Publisher = (*publisher)(nil)
const logInterval = 1 * time.Minute
// Publisher is wrapper around DB.
type publisher struct {
db *postgres.DB
currBatchSize uint
stateNodeCounter uint64
storageNodeCounter uint64
codeNodeCounter uint64
startTime time.Time
}
// NewPublisher creates Publisher
func NewPublisher(db *postgres.DB) *publisher {
return &publisher{
db: db,
startTime: time.Now(),
}
}
type pubTx struct {
sql.Tx
callback func()
}
func (tx pubTx) Rollback() error { return tx.Tx.Rollback(context.Background()) }
func (tx pubTx) Commit() error {
if tx.callback != nil {
defer tx.callback()
}
return tx.Tx.Commit(context.Background())
}
func (tx pubTx) Exec(sql string, args ...interface{}) (sql.Result, error) {
return tx.Tx.Exec(context.Background(), sql, args...)
}
func (p *publisher) BeginTx() (snapt.Tx, error) {
tx, err := p.db.Begin(context.Background())
if err != nil {
return nil, err
}
go p.logNodeCounters()
return pubTx{tx, func() {
p.printNodeCounters("final stats")
}}, nil
}
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
// returns the CID and blockstore prefixed multihash key
func (tx pubTx) publishRaw(codec uint64, raw []byte, height *big.Int) (cid, prefixedKey string, err error) {
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
if err != nil {
return
}
cid = c.String()
prefixedKey, err = tx.publishIPLD(c, raw, height)
return
}
func (tx pubTx) publishIPLD(c cid.Cid, raw []byte, height *big.Int) (string, error) {
dbKey := dshelp.MultihashToDsKey(c.Hash())
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
_, err := tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), height.Uint64(), prefixedKey, raw)
return prefixedKey, err
}
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary indexes in the header_cids table
func (p *publisher) PublishHeader(header *types.Header) (err error) {
headerNode, err := ipld.NewEthHeader(header)
if err != nil {
return err
}
snapTx, err := p.db.Begin(context.Background())
if err != nil {
return err
}
tx := pubTx{snapTx, nil}
defer func() {
err = snapt.CommitOrRollback(tx, err)
if err != nil {
logrus.Errorf("CommitOrRollback failed: %s", err)
}
}()
if _, err = tx.publishIPLD(headerNode.Cid(), headerNode.RawData(), header.Number); err != nil {
return err
}
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
_, err = tx.Exec(snapt.TableHeader.ToInsertStatement(), header.Number.Uint64(), header.Hash().Hex(),
header.ParentHash.Hex(), headerNode.Cid().String(), "0", p.db.NodeID(), "0",
header.Root.Hex(), header.TxHash.Hex(), header.ReceiptHash.Hex(), header.UncleHash.Hex(),
header.Bloom.Bytes(), header.Time, mhKey, 0, header.Coinbase.String())
return err
}
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes in the state_cids table
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, height *big.Int, snapTx snapt.Tx) error {
var stateKey string
if !snapt.IsNullHash(node.Key) {
stateKey = node.Key.Hex()
}
tx := snapTx.(pubTx)
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value, height)
if err != nil {
return err
}
_, err = tx.Exec(snapt.TableStateNode.ToInsertStatement(),
height.Uint64(), headerID, stateKey, stateCIDStr, node.Path, node.NodeType, false, mhKey)
if err != nil {
return err
}
// increment state node counter.
atomic.AddUint64(&p.stateNodeCounter, 1)
prom.IncStateNodeCount()
// increment current batch size counter
p.currBatchSize += 2
return err
}
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary indexes in the storage_cids table
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, height *big.Int, statePath []byte, snapTx snapt.Tx) error {
var storageKey string
if !snapt.IsNullHash(node.Key) {
storageKey = node.Key.Hex()
}
tx := snapTx.(pubTx)
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value, height)
if err != nil {
return err
}
_, err = tx.Exec(snapt.TableStorageNode.ToInsertStatement(),
height.Uint64(), headerID, statePath, storageKey, storageCIDStr, node.Path, node.NodeType, false, mhKey)
if err != nil {
return err
}
// increment storage node counter.
atomic.AddUint64(&p.storageNodeCounter, 1)
prom.IncStorageNodeCount()
// increment current batch size counter
p.currBatchSize += 2
return err
}
// PublishCode writes code to the ipfs backing pg datastore
func (p *publisher) PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
// no codec for code, doesn't matter though since blockstore key is multihash-derived
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
tx := snapTx.(pubTx)
if _, err = tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), height.Uint64(), mhKey, codeBytes); err != nil {
return fmt.Errorf("error publishing code IPLD: %v", err)
}
// increment code node counter.
atomic.AddUint64(&p.codeNodeCounter, 1)
prom.IncCodeNodeCount()
p.currBatchSize++
return nil
}
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
var err error
// maximum batch size reached, commit the current transaction and begin a new transaction.
if maxBatchSize <= p.currBatchSize {
if err = tx.Commit(); err != nil {
return nil, err
}
snapTx, err := p.db.Begin(context.Background())
tx = pubTx{Tx: snapTx}
if err != nil {
return nil, err
}
p.currBatchSize = 0
}
return tx, nil
}
// logNodeCounters periodically logs the number of node processed.
func (p *publisher) logNodeCounters() {
t := time.NewTicker(logInterval)
for range t.C {
p.printNodeCounters("progress")
}
}
func (p *publisher) printNodeCounters(msg string) {
log.WithFields(log.Fields{
"runtime": time.Now().Sub(p.startTime).String(),
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
}).Info(msg)
}

View File

@ -1,72 +0,0 @@
package pg
import (
"context"
"testing"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
"github.com/vulcanize/ipld-eth-state-snapshot/test"
)
var (
pgConfig = test.DefaultPgConfig
nodeInfo = test.DefaultNodeInfo
// tables ordered according to fkey depedencies
allTables = []*snapt.Table{
&snapt.TableIPLDBlock,
&snapt.TableNodeInfo,
&snapt.TableHeader,
&snapt.TableStateNode,
&snapt.TableStorageNode,
}
)
func writeData(t *testing.T, db *postgres.DB) *publisher {
pub := NewPublisher(db)
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
tx, err := pub.BeginTx()
test.NoError(t, err)
headerID := fixt.Block1_Header.Hash().String()
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, fixt.Block1_Header.Number, tx))
test.NoError(t, tx.Commit())
return pub
}
// Note: DB user requires role membership "pg_read_server_files"
func TestBasic(t *testing.T) {
test.NeedsDB(t)
ctx := context.Background()
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
test.NoError(t, err)
db := postgres.NewPostgresDB(driver)
sql.TearDownDB(t, db)
_ = writeData(t, db)
// check header was successfully committed
pgQueryHeader := `SELECT cid, block_hash
FROM eth.header_cids
WHERE block_number = $1`
type res struct {
CID string
BlockHash string
}
var header res
err = db.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
&header.CID, &header.BlockHash)
test.NoError(t, err)
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
test.NoError(t, err)
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
}

View File

@ -16,25 +16,26 @@
package snapshot package snapshot
import ( import (
"bytes" "context"
"errors"
"fmt" "fmt"
"math/big" "math/big"
"os"
"os/signal"
"sync" "sync"
"syscall"
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
statediff "github.com/cerc-io/plugeth-statediff"
"github.com/cerc-io/plugeth-statediff/adapt"
"github.com/cerc-io/plugeth-statediff/indexer"
"github.com/cerc-io/plugeth-statediff/types"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
iter "github.com/vulcanize/go-eth-state-node-iterator"
. "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
) )
var ( var (
@ -48,308 +49,124 @@ var (
// Service holds ethDB and stateDB to read data from lvldb and Publisher // Service holds ethDB and stateDB to read data from lvldb and Publisher
// to publish trie in postgres DB. // to publish trie in postgres DB.
type Service struct { type Service struct {
ethDB ethdb.Database ethDB ethdb.Database
stateDB state.Database stateDB state.Database
ipfsPublisher Publisher indexer indexer.Indexer
maxBatchSize uint maxBatchSize uint
tracker iteratorTracker recoveryFile string
recoveryFile string
} }
func NewLevelDB(con *EthConfig) (ethdb.Database, error) { func NewEthDB(con *EthDBConfig) (ethdb.Database, error) {
edb, err := rawdb.NewLevelDBDatabaseWithFreezer( return rawdb.Open(rawdb.OpenOptions{
con.LevelDBPath, 1024, 256, con.AncientDBPath, "ipld-eth-state-snapshot", true, Directory: con.DBPath,
) AncientsDirectory: con.AncientDBPath,
if err != nil { Namespace: "ipld-eth-state-snapshot",
return nil, fmt.Errorf("unable to create NewLevelDBDatabaseWithFreezer: %s", err) Cache: 1024,
} Handles: 256,
return edb, nil ReadOnly: true,
})
} }
// NewSnapshotService creates Service. // NewSnapshotService creates Service.
func NewSnapshotService(edb ethdb.Database, pub Publisher, recoveryFile string) (*Service, error) { func NewSnapshotService(edb ethdb.Database, indexer indexer.Indexer, recoveryFile string) (*Service, error) {
return &Service{ return &Service{
ethDB: edb, ethDB: edb,
stateDB: state.NewDatabase(edb), stateDB: state.NewDatabase(edb),
ipfsPublisher: pub, indexer: indexer,
maxBatchSize: defaultBatchSize, maxBatchSize: defaultBatchSize,
recoveryFile: recoveryFile, recoveryFile: recoveryFile,
}, nil }, nil
} }
type SnapshotParams struct { type SnapshotParams struct {
Height uint64 WatchedAddresses []common.Address
Workers uint Height uint64
Workers uint
} }
func (s *Service) CreateSnapshot(params SnapshotParams) error { func (s *Service) CreateSnapshot(params SnapshotParams) error {
// extract header from lvldb and publish to PG-IPFS // extract header from lvldb and publish to PG-IPFS
// hold onto the headerID so that we can link the state nodes to this header // hold onto the headerID so that we can link the state nodes to this header
log.Infof("Creating snapshot at height %d", params.Height)
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height) hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
header := rawdb.ReadHeader(s.ethDB, hash, params.Height) header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
if header == nil { if header == nil {
return fmt.Errorf("unable to read canonical header at height %d", params.Height) return fmt.Errorf("unable to read canonical header at height %d", params.Height)
} }
log.WithField("height", params.Height).WithField("hash", hash).Info("Creating snapshot")
log.Infof("head hash: %s head height: %d", hash.Hex(), params.Height) // Context for snapshot work
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
// Cancel context on receiving a signal. On cancellation, all tracked iterators complete
// processing of their current node before stopping.
captureSignal(cancelCtx)
err := s.ipfsPublisher.PublishHeader(header) var err error
tx := s.indexer.BeginTx(header.Number, ctx)
defer tx.RollbackOnFailure(err)
var headerid string
headerid, err = s.indexer.PushHeader(tx, header, big.NewInt(0), big.NewInt(0))
if err != nil { if err != nil {
return err return err
} }
tree, err := s.stateDB.OpenTrie(header.Root) tr := prom.NewTracker(s.recoveryFile, params.Workers)
if err != nil {
return err
}
headerID := header.Hash().String()
s.tracker = newTracker(s.recoveryFile, int(params.Workers))
s.tracker.captureSignal()
var iters []trie.NodeIterator
// attempt to restore from recovery file if it exists
iters, err = s.tracker.restore(tree)
if err != nil {
log.Errorf("restore error: %s", err.Error())
return err
}
if iters != nil {
log.Debugf("restored iterators; count: %d", len(iters))
if params.Workers < uint(len(iters)) {
return fmt.Errorf(
"number of recovered workers (%d) is greater than number configured (%d)",
len(iters), params.Workers,
)
}
} else { // nothing to restore
log.Debugf("no iterators to restore")
if params.Workers > 1 {
iters = iter.SubtrieIterators(tree, params.Workers)
} else {
iters = []trie.NodeIterator{tree.NodeIterator(nil)}
}
for i, it := range iters {
iters[i] = s.tracker.tracked(it)
}
}
defer func() { defer func() {
err := s.tracker.haltAndDump() err := tr.CloseAndSave()
if err != nil { if err != nil {
log.Errorf("failed to write recovery file: %v", err) log.Errorf("failed to write recovery file: %v", err)
} }
}() }()
if len(iters) > 0 { var nodeMtx, ipldMtx sync.Mutex
return s.createSnapshotAsync(iters, headerID, new(big.Int).SetUint64(params.Height)) nodeSink := func(node types.StateLeafNode) error {
} else { nodeMtx.Lock()
return s.createSnapshot(iters[0], headerID, new(big.Int).SetUint64(params.Height)) defer nodeMtx.Unlock()
prom.IncStateNodeCount()
prom.AddStorageNodeCount(len(node.StorageDiff))
return s.indexer.PushStateNode(tx, node, headerid)
} }
} ipldSink := func(c types.IPLD) error {
ipldMtx.Lock()
// Create snapshot up to head (ignores height param) defer ipldMtx.Unlock()
func (s *Service) CreateLatestSnapshot(workers uint) error { return s.indexer.PushIPLD(tx, c)
log.Info("Creating snapshot at head")
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
if height == nil {
return fmt.Errorf("unable to read header height for header hash %s", hash.String())
}
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers})
}
type nodeResult struct {
node Node
elements []interface{}
}
func resolveNode(it trie.NodeIterator, trieDB *trie.Database) (*nodeResult, error) {
// "leaf" nodes are actually "value" nodes, whose parents are the actual leaves
if it.Leaf() {
return nil, nil
}
if IsNullHash(it.Hash()) {
return nil, nil
} }
path := make([]byte, len(it.Path())) sdparams := statediff.Params{
copy(path, it.Path()) WatchedAddresses: params.WatchedAddresses,
n, err := trieDB.Node(it.Hash())
if err != nil {
return nil, err
} }
var elements []interface{} sdparams.ComputeWatchedAddressesLeafPaths()
if err := rlp.DecodeBytes(n, &elements); err != nil { builder := statediff.NewBuilder(adapt.GethStateView(s.stateDB))
return nil, err builder.SetSubtrieWorkers(params.Workers)
} if err = builder.WriteStateSnapshot(ctx, header.Root, sdparams, nodeSink, ipldSink, tr); err != nil {
ty, err := CheckKeyType(elements)
if err != nil {
return nil, err
}
return &nodeResult{
node: Node{
NodeType: ty,
Path: path,
Value: n,
},
elements: elements,
}, nil
}
func (s *Service) createSnapshot(it trie.NodeIterator, headerID string, height *big.Int) error {
tx, err := s.ipfsPublisher.BeginTx()
if err != nil {
return err return err
} }
defer func() {
err = CommitOrRollback(tx, err)
if err != nil {
logrus.Errorf("CommitOrRollback failed: %s", err)
}
}()
for it.Next(true) { if err = tx.Submit(); err != nil {
res, err := resolveNode(it, s.stateDB.TrieDB()) return fmt.Errorf("batch transaction submission failed: %w", err)
if err != nil {
return err
}
if res == nil {
continue
}
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
if err != nil {
return err
}
switch res.node.NodeType {
case Leaf:
// if the node is a leaf, decode the account and publish the associated storage trie
// nodes if there are any
var account types.StateAccount
if err := rlp.DecodeBytes(res.elements[1].([]byte), &account); err != nil {
return fmt.Errorf(
"error decoding account for leaf node at path %x nerror: %v", res.node.Path, err)
}
partialPath := trie.CompactToHex(res.elements[0].([]byte))
valueNodePath := append(res.node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
res.node.Key = common.BytesToHash(leafKey)
err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, height, tx)
if err != nil {
return err
}
// publish any non-nil code referenced by codehash
if !bytes.Equal(account.CodeHash, emptyCodeHash) {
codeHash := common.BytesToHash(account.CodeHash)
codeBytes := rawdb.ReadCode(s.ethDB, codeHash)
if len(codeBytes) == 0 {
log.Error("Code is missing", "account", common.BytesToHash(it.LeafKey()))
return errors.New("missing code")
}
if err = s.ipfsPublisher.PublishCode(height, codeHash, codeBytes, tx); err != nil {
return err
}
}
if tx, err = s.storageSnapshot(account.Root, headerID, height, res.node.Path, tx); err != nil {
return fmt.Errorf("failed building storage snapshot for account %+v\r\nerror: %w", account, err)
}
case Extension, Branch:
res.node.Key = common.BytesToHash([]byte{})
if err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, height, tx); err != nil {
return err
}
default:
return errors.New("unexpected node type")
}
}
return it.Error()
}
// Full-trie concurrent snapshot
func (s *Service) createSnapshotAsync(iters []trie.NodeIterator, headerID string, height *big.Int) error {
errors := make(chan error)
var wg sync.WaitGroup
for _, it := range iters {
wg.Add(1)
go func(it trie.NodeIterator) {
defer wg.Done()
if err := s.createSnapshot(it, headerID, height); err != nil {
errors <- err
}
}(it)
}
done := make(chan struct{})
go func() {
wg.Wait()
done <- struct{}{}
}()
var err error
select {
case err = <-errors:
case <-done:
close(errors)
} }
return err return err
} }
func (s *Service) storageSnapshot(sr common.Hash, headerID string, height *big.Int, statePath []byte, tx Tx) (Tx, error) { // CreateLatestSnapshot snapshot at head (ignores height param)
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) { func (s *Service) CreateLatestSnapshot(workers uint, watchedAddresses []common.Address) error {
return tx, nil log.Info("Creating snapshot at head")
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
if height == nil {
return fmt.Errorf("unable to read header height for header hash %s", hash)
} }
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers, WatchedAddresses: watchedAddresses})
sTrie, err := s.stateDB.OpenTrie(sr) }
if err != nil {
return nil, err func captureSignal(cb func()) {
} sigChan := make(chan os.Signal, 1)
it := sTrie.NodeIterator(make([]byte, 0)) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
for it.Next(true) { go func() {
res, err := resolveNode(it, s.stateDB.TrieDB()) sig := <-sigChan
if err != nil { log.Errorf("Signal received (%v), stopping", sig)
return nil, err cb()
} }()
if res == nil {
continue
}
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
if err != nil {
return nil, err
}
var nodeData []byte
nodeData, err = s.stateDB.TrieDB().Node(it.Hash())
if err != nil {
return nil, err
}
res.node.Value = nodeData
switch res.node.NodeType {
case Leaf:
partialPath := trie.CompactToHex(res.elements[0].([]byte))
valueNodePath := append(res.node.Path, partialPath...)
encodedPath := trie.HexToCompact(valueNodePath)
leafKey := encodedPath[1:]
res.node.Key = common.BytesToHash(leafKey)
case Extension, Branch:
res.node.Key = common.BytesToHash([]byte{})
default:
return nil, errors.New("unexpected node type")
}
if err = s.ipfsPublisher.PublishStorageNode(&res.node, headerID, height, statePath, tx); err != nil {
return nil, err
}
}
return tx, it.Error()
} }

View File

@ -1,146 +1,269 @@
package snapshot package snapshot_test
import ( import (
"errors" "fmt"
"math/big" "math/rand"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/golang/mock/gomock" "github.com/cerc-io/eth-testing/chains"
"github.com/cerc-io/plugeth-statediff/indexer/models"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture" "github.com/cerc-io/ipld-eth-state-snapshot/internal/mocks"
mock "github.com/vulcanize/ipld-eth-state-snapshot/mocks/snapshot" . "github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types" fixture "github.com/cerc-io/ipld-eth-state-snapshot/test"
"github.com/vulcanize/ipld-eth-state-snapshot/test"
) )
func testConfig(leveldbpath, ancientdbpath string) *Config { var (
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
// Note: block 1 doesn't have storage nodes. TODO: add fixtures with storage nodes
// chainAblock1StateKeys = sliceToSet(fixture.ChainA_Block1_StateNodeLeafKeys)
chainAblock1IpldCids = sliceToSet(fixture.ChainA_Block1_IpldCids)
subtrieWorkerCases = []uint{1, 4, 8, 16, 32}
)
type selectiveData struct {
StateNodes map[string]*models.StateNodeModel
StorageNodes map[string]map[string]*models.StorageNodeModel
}
func testConfig(ethdbpath, ancientdbpath string) *Config {
return &Config{ return &Config{
Eth: &EthConfig{ Eth: &EthDBConfig{
LevelDBPath: leveldbpath, DBPath: ethdbpath,
AncientDBPath: ancientdbpath, AncientDBPath: ancientdbpath,
NodeInfo: test.DefaultNodeInfo, NodeInfo: DefaultNodeInfo,
},
DB: &DBConfig{
URI: test.DefaultPgConfig.DbConnectionString(),
ConnConfig: test.DefaultPgConfig,
}, },
DB: &DefaultPgConfig,
} }
} }
func makeMocks(t *testing.T) (*mock.MockPublisher, *mock.MockTx) { func TestSnapshot(t *testing.T) {
ctl := gomock.NewController(t) runCase := func(t *testing.T, workers uint) {
pub := mock.NewMockPublisher(ctl) params := SnapshotParams{Height: 1, Workers: workers}
tx := mock.NewMockTx(ctl) data := doSnapshot(t, fixture.ChainA, params)
return pub, tx verify_chainAblock1(t, data)
}
func TestCreateSnapshot(t *testing.T) {
runCase := func(t *testing.T, workers int) {
pub, tx := makeMocks(t)
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Block1_Header))
pub.EXPECT().BeginTx().Return(tx, nil).
Times(workers)
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).
AnyTimes()
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
// Use MinTimes as duplicate nodes are expected at boundaries
MinTimes(len(fixt.Block1_StateNodePaths))
// TODO: fixtures for storage node
// pub.EXPECT().PublishStorageNode(gomock.Eq(fixt.StorageNode), gomock.Eq(int64(0)), gomock.Any())
tx.EXPECT().Commit().
Times(workers)
config := testConfig(fixt.ChaindataPath, fixt.AncientdataPath)
edb, err := NewLevelDB(config.Eth)
if err != nil {
t.Fatal(err)
}
defer edb.Close()
recovery := filepath.Join(t.TempDir(), "recover.csv")
service, err := NewSnapshotService(edb, pub, recovery)
if err != nil {
t.Fatal(err)
}
params := SnapshotParams{Height: 1, Workers: uint(workers)}
err = service.CreateSnapshot(params)
if err != nil {
t.Fatal(err)
}
} }
testCases := []int{1, 4, 16, 32} for _, tc := range subtrieWorkerCases {
for _, tc := range testCases { t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
t.Run("case", func(t *testing.T) { runCase(t, tc) })
} }
} }
func failingPublishStateNode(_ *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error { func TestAccountSelectiveSnapshot(t *testing.T) {
return errors.New("failingPublishStateNode") height := uint64(32)
} watchedAddresses, expected := watchedAccountData_chainBblock32()
func TestRecovery(t *testing.T) { runCase := func(t *testing.T, workers uint) {
runCase := func(t *testing.T, workers int) { params := SnapshotParams{
pub, tx := makeMocks(t) Height: height,
pub.EXPECT().PublishHeader(gomock.Any()).AnyTimes() Workers: workers,
pub.EXPECT().BeginTx().Return(tx, nil).AnyTimes() WatchedAddresses: watchedAddresses,
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Times(workers).
DoAndReturn(failingPublishStateNode)
tx.EXPECT().Commit().AnyTimes()
config := testConfig(fixt.ChaindataPath, fixt.AncientdataPath)
edb, err := NewLevelDB(config.Eth)
if err != nil {
t.Fatal(err)
}
defer edb.Close()
recovery := filepath.Join(t.TempDir(), "recover.csv")
service, err := NewSnapshotService(edb, pub, recovery)
if err != nil {
t.Fatal(err)
}
params := SnapshotParams{Height: 1, Workers: uint(workers)}
err = service.CreateSnapshot(params)
if err == nil {
t.Fatal("expected an error")
}
if _, err = os.Stat(recovery); err != nil {
t.Fatal("cannot stat recovery file:", err)
}
// Wait for earlier snapshot process to complete
time.Sleep(5 * time.Second)
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
err = service.CreateSnapshot(params)
if err != nil {
t.Fatal(err)
}
_, err = os.Stat(recovery)
if err == nil {
t.Fatal("recovery file still present")
} else {
if !os.IsNotExist(err) {
t.Fatal(err)
}
} }
data := doSnapshot(t, fixture.ChainB, params)
expected.verify(t, data)
} }
testCases := []int{1, 4, 32} for _, tc := range subtrieWorkerCases {
for _, tc := range testCases { t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
t.Run("case", func(t *testing.T) { runCase(t, tc) }) }
}
func TestSnapshotRecovery(t *testing.T) {
runCase := func(t *testing.T, workers uint, interruptAt uint) {
params := SnapshotParams{Height: 1, Workers: workers}
data := doSnapshotWithRecovery(t, fixture.ChainA, params, interruptAt)
verify_chainAblock1(t, data)
} }
interrupts := make([]uint, 4)
for i := 0; i < len(interrupts); i++ {
N := len(fixture.ChainA_Block1_StateNodeLeafKeys)
interrupts[i] = uint(rand.Intn(N/2) + N/4)
}
for _, tc := range subtrieWorkerCases {
for i, interrupt := range interrupts {
t.Run(
fmt.Sprintf("with %d subtries %d", tc, i),
func(t *testing.T) { runCase(t, tc, interrupt) },
)
}
}
}
func TestAccountSelectiveSnapshotRecovery(t *testing.T) {
height := uint64(32)
watchedAddresses, expected := watchedAccountData_chainBblock32()
runCase := func(t *testing.T, workers uint, interruptAt uint) {
params := SnapshotParams{
Height: height,
Workers: workers,
WatchedAddresses: watchedAddresses,
}
data := doSnapshotWithRecovery(t, fixture.ChainB, params, interruptAt)
expected.verify(t, data)
}
for _, tc := range subtrieWorkerCases {
t.Run(
fmt.Sprintf("with %d subtries", tc),
func(t *testing.T) { runCase(t, tc, 1) },
)
}
}
func verify_chainAblock1(t *testing.T, data mocks.IndexerData) {
// Extract indexed keys and sort them for comparison
var indexedStateKeys []string
for _, stateNode := range data.StateNodes {
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
indexedStateKeys = append(indexedStateKeys, stateKey)
}
require.ElementsMatch(t, fixture.ChainA_Block1_StateNodeLeafKeys, indexedStateKeys)
ipldCids := make(map[string]struct{})
for _, ipld := range data.IPLDs {
ipldCids[ipld.CID] = struct{}{}
}
require.Equal(t, chainAblock1IpldCids, ipldCids)
}
func watchedAccountData_chainBblock32() ([]common.Address, selectiveData) {
watchedAddresses := []common.Address{
// hash 0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b
common.HexToAddress("0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"),
// hash 0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d
common.HexToAddress("0x0616F59D291a898e796a1FAD044C5926ed2103eC"),
}
var expected selectiveData
expected.StateNodes = make(map[string]*models.StateNodeModel)
for _, index := range []int{0, 4} {
node := &fixture.ChainB_Block32_StateNodes[index]
expected.StateNodes[node.StateKey] = node
}
// Map account leaf keys to corresponding storage
expectedStorageNodeIndexes := []struct {
address common.Address
indexes []int
}{
{watchedAddresses[0], []int{9, 11}},
{watchedAddresses[1], []int{0, 1, 2, 4, 6}},
}
expected.StorageNodes = make(map[string]map[string]*models.StorageNodeModel)
for _, account := range expectedStorageNodeIndexes {
leafKey := crypto.Keccak256Hash(account.address[:]).String()
storageNodes := make(map[string]*models.StorageNodeModel)
for _, index := range account.indexes {
node := &fixture.ChainB_Block32_StorageNodes[index]
storageNodes[node.StorageKey] = node
}
expected.StorageNodes[leafKey] = storageNodes
}
return watchedAddresses, expected
}
func (expected selectiveData) verify(t *testing.T, data mocks.IndexerData) {
// check that all indexed nodes are expected and correct
indexedStateKeys := make(map[string]struct{})
for _, stateNode := range data.StateNodes {
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
indexedStateKeys[stateKey] = struct{}{}
require.Contains(t, expected.StateNodes, stateKey, "unexpected state node")
model := expected.StateNodes[stateKey]
require.Equal(t, model.CID, stateNode.AccountWrapper.CID)
require.Equal(t, model.Balance, stateNode.AccountWrapper.Account.Balance.String())
require.Equal(t, model.StorageRoot, stateNode.AccountWrapper.Account.Root.String())
expectedStorage := expected.StorageNodes[stateKey]
indexedStorageKeys := make(map[string]struct{})
for _, storageNode := range stateNode.StorageDiff {
storageKey := common.BytesToHash(storageNode.LeafKey).String()
indexedStorageKeys[storageKey] = struct{}{}
require.Contains(t, expectedStorage, storageKey, "unexpected storage node")
require.Equal(t, expectedStorage[storageKey].CID, storageNode.CID)
require.Equal(t, expectedStorage[storageKey].Value, storageNode.Value)
}
// check for completeness
for storageNode := range expectedStorage {
require.Contains(t, indexedStorageKeys, storageNode, "missing storage node")
}
}
// check for completeness
for stateNode := range expected.StateNodes {
require.Contains(t, indexedStateKeys, stateNode, "missing state node")
}
}
func doSnapshot(t *testing.T, chain *chains.Paths, params SnapshotParams) mocks.IndexerData {
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
config := testConfig(chainDataPath, ancientDataPath)
edb, err := NewEthDB(config.Eth)
require.NoError(t, err)
defer edb.Close()
idx := mocks.NewIndexer(t)
recovery := filepath.Join(t.TempDir(), "recover.csv")
service, err := NewSnapshotService(edb, idx, recovery)
require.NoError(t, err)
err = service.CreateSnapshot(params)
require.NoError(t, err)
return idx.IndexerData
}
func doSnapshotWithRecovery(
t *testing.T,
chain *chains.Paths,
params SnapshotParams,
failAfter uint,
) mocks.IndexerData {
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
config := testConfig(chainDataPath, ancientDataPath)
edb, err := NewEthDB(config.Eth)
require.NoError(t, err)
defer edb.Close()
indexer := &mocks.InterruptingIndexer{
Indexer: mocks.NewIndexer(t),
InterruptAfter: failAfter,
}
t.Logf("Will interrupt after %d state nodes", failAfter)
recoveryFile := filepath.Join(t.TempDir(), "recover.csv")
service, err := NewSnapshotService(edb, indexer, recoveryFile)
require.NoError(t, err)
err = service.CreateSnapshot(params)
require.Error(t, err)
require.FileExists(t, recoveryFile)
// We should only have processed nodes up to the break, plus an extra node per worker
require.LessOrEqual(t, len(indexer.StateNodes), int(indexer.InterruptAfter+params.Workers))
// use the nested mock indexer, to continue where it left off
recoveryIndexer := indexer.Indexer
service, err = NewSnapshotService(edb, recoveryIndexer, recoveryFile)
require.NoError(t, err)
err = service.CreateSnapshot(params)
require.NoError(t, err)
return recoveryIndexer.IndexerData
}
func sliceToSet[T comparable](slice []T) map[T]struct{} {
set := make(map[T]struct{})
for _, v := range slice {
set[v] = struct{}{}
}
return set
} }

View File

@ -1,163 +0,0 @@
package snapshot
import (
"encoding/csv"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie"
log "github.com/sirupsen/logrus"
iter "github.com/vulcanize/go-eth-state-node-iterator"
)
type trackedIter struct {
trie.NodeIterator
tracker *iteratorTracker
}
func (it *trackedIter) Next(descend bool) bool {
ret := it.NodeIterator.Next(descend)
if !ret {
if it.tracker.running {
it.tracker.stopChan <- it
} else {
log.Errorf("iterator stopped after tracker halted: path=%x", it.Path())
}
}
return ret
}
type iteratorTracker struct {
recoveryFile string
startChan chan *trackedIter
stopChan chan *trackedIter
started map[*trackedIter]struct{}
stopped []*trackedIter
running bool
}
func newTracker(file string, buf int) iteratorTracker {
return iteratorTracker{
recoveryFile: file,
startChan: make(chan *trackedIter, buf),
stopChan: make(chan *trackedIter, buf),
started: map[*trackedIter]struct{}{},
running: true,
}
}
func (tr *iteratorTracker) captureSignal() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigChan
log.Errorf("Signal received (%v), stopping", sig)
tr.haltAndDump()
os.Exit(1)
}()
}
// Wraps an iterator in a trackedIter. This should not be called once halts are possible.
func (tr *iteratorTracker) tracked(it trie.NodeIterator) (ret *trackedIter) {
ret = &trackedIter{it, tr}
tr.startChan <- ret
return
}
// dumps iterator path and bounds to a text file so it can be restored later
func (tr *iteratorTracker) dump() error {
log.Debug("Dumping recovery state to: ", tr.recoveryFile)
var rows [][]string
for it, _ := range tr.started {
var endPath []byte
if impl, ok := it.NodeIterator.(*iter.PrefixBoundIterator); ok {
endPath = impl.EndPath
}
rows = append(rows, []string{
fmt.Sprintf("%x", it.Path()),
fmt.Sprintf("%x", endPath),
})
}
file, err := os.Create(tr.recoveryFile)
if err != nil {
return err
}
defer file.Close()
out := csv.NewWriter(file)
return out.WriteAll(rows)
}
// attempts to read iterator state from file
// if file doesn't exist, returns an empty slice with no error
func (tr *iteratorTracker) restore(tree state.Trie) ([]trie.NodeIterator, error) {
file, err := os.Open(tr.recoveryFile)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
log.Debug("Restoring recovery state from: ", tr.recoveryFile)
defer file.Close()
in := csv.NewReader(file)
in.FieldsPerRecord = 2
rows, err := in.ReadAll()
if err != nil {
return nil, err
}
var ret []trie.NodeIterator
for _, row := range rows {
// pick up where each interval left off
var paths [2][]byte
for i, val := range row {
if len(val) != 0 {
if _, err = fmt.Sscanf(val, "%x", &paths[i]); err != nil {
return nil, err
}
}
}
// Force the lower bound path to an even length
if len(paths[0])&0b1 == 1 {
decrementPath(paths[0]) // decrement first to avoid skipped nodes
paths[0] = append(paths[0], 0)
}
it := iter.NewPrefixBoundIterator(tree.NodeIterator(iter.HexToKeyBytes(paths[0])), paths[1])
ret = append(ret, tr.tracked(it))
}
return ret, nil
}
func (tr *iteratorTracker) haltAndDump() error {
tr.running = false
// drain any pending events
close(tr.startChan)
for start := range tr.startChan {
tr.started[start] = struct{}{}
}
close(tr.stopChan)
for stop := range tr.stopChan {
tr.stopped = append(tr.stopped, stop)
}
for _, stop := range tr.stopped {
delete(tr.started, stop)
}
if len(tr.started) == 0 {
// if the tracker state is empty, erase any existing recovery file
err := os.Remove(tr.recoveryFile)
if os.IsNotExist(err) {
err = nil
}
return err
}
return tr.dump()
}

View File

@ -1,53 +1,76 @@
package snapshot package snapshot
import ( import (
"context" "bytes"
"fmt"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
file "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/file"
pg "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/pg"
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
) )
func NewPublisher(mode SnapshotMode, config *Config) (snapt.Publisher, error) { // Estimate the number of iterations necessary to step from start to end.
switch mode { func estimateSteps(start []byte, end []byte, depth int) uint64 {
case PgSnapshot: // We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
driver, err := postgres.NewPGXDriver(context.Background(), config.DB.ConnConfig, config.Eth.NodeInfo) // For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
if err != nil { // start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
return nil, err // be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
// to move from 0500:06 is 16.
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
if 0 == len(start) {
start = []byte{0x0}
} }
if 0 == len(end) {
prom.RegisterDBCollector(config.DB.ConnConfig.DatabaseName, driver) end = []byte{0x10}
}
return pg.NewPublisher(postgres.NewPostgresDB(driver)), nil normalizedStart := make([]byte, depth)
case FileSnapshot: normalizedEnd := make([]byte, depth)
return file.NewPublisher(config.File.OutputDir, config.Eth.NodeInfo) for i := 0; i < depth; i++ {
if i < len(start) {
normalizedStart[i] = start[i]
}
if i < len(end) {
normalizedEnd[i] = end[i]
}
}
return normalizedStart, normalizedEnd
} }
return nil, fmt.Errorf("invalid snapshot mode: %s", mode)
}
// Subtracts 1 from the last byte in a path slice, carrying if needed. // We have no need to handle negative exponents, so uints are fine.
// Does nothing, returning false, for all-zero inputs. pow := func(x uint64, y uint) uint64 {
func decrementPath(path []byte) bool { if 0 == y {
// check for all zeros return 1
allzero := true }
for i := 0; i < len(path); i++ { ret := x
allzero = allzero && path[i] == 0 for i := uint(0); i < y; i++ {
ret *= x
}
return x
} }
if allzero {
return false // Fix the paths.
start, end = normalizePathRange(start, end, depth)
// No negative distances, if the start is already >= end, the distance is 0.
if bytes.Compare(start, end) >= 0 {
return 0
} }
for i := len(path) - 1; i >= 0; i-- {
val := path[i] // Subtract each component, right to left, carrying over if necessary.
path[i]-- difference := make([]byte, len(start))
if val == 0 { var carry byte = 0
path[i] = 0xf for i := len(start) - 1; i >= 0; i-- {
result := end[i] - start[i] - carry
if result > 0xf && i > 0 {
result &= 0xf
carry = 1
} else { } else {
return true carry = 0
} }
difference[i] = result
} }
return true
// Calculate the result.
var ret uint64 = 0
for i := 0; i < len(difference); i++ {
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
}
return ret
} }

View File

@ -1,63 +0,0 @@
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
)
// node for holding trie node information
type Node struct {
NodeType nodeType
Path []byte
Key common.Hash
Value []byte
}
// nodeType for explicitly setting type of node
type nodeType int
const (
Branch nodeType = iota
Extension
Leaf
Removed
Unknown
)
// CheckKeyType checks what type of key we have
func CheckKeyType(elements []interface{}) (nodeType, error) {
if len(elements) > 2 {
return Branch, nil
}
if len(elements) < 2 {
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
}
switch elements[0].([]byte)[0] / 16 {
case '\x00':
return Extension, nil
case '\x01':
return Extension, nil
case '\x02':
return Leaf, nil
case '\x03':
return Leaf, nil
default:
return Unknown, fmt.Errorf("unknown hex prefix")
}
}

View File

@ -1,22 +0,0 @@
package types
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
type Publisher interface {
PublishHeader(header *types.Header) error
PublishStateNode(node *Node, headerID string, height *big.Int, tx Tx) error
PublishStorageNode(node *Node, headerID string, height *big.Int, statePath []byte, tx Tx) error
PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, tx Tx) error
BeginTx() (Tx, error)
PrepareTxForBatch(tx Tx, batchSize uint) (Tx, error)
}
type Tx interface {
Rollback() error
Commit() error
}

View File

@ -1,76 +0,0 @@
package types
var TableIPLDBlock = Table{
`public.blocks`,
[]column{
{"block_number", bigint},
{"key", text},
{"data", bytea},
},
"ON CONFLICT (key, block_number) DO NOTHING",
}
var TableNodeInfo = Table{
Name: `public.nodes`,
Columns: []column{
{"genesis_block", varchar},
{"network_id", varchar},
{"node_id", varchar},
{"client_name", varchar},
{"chain_id", integer},
},
}
var TableHeader = Table{
"eth.header_cids",
[]column{
{"block_number", bigint},
{"block_hash", varchar},
{"parent_hash", varchar},
{"cid", text},
{"td", numeric},
{"node_id", varchar},
{"reward", numeric},
{"state_root", varchar},
{"tx_root", varchar},
{"receipt_root", varchar},
{"uncle_root", varchar},
{"bloom", bytea},
{"timestamp", numeric},
{"mh_key", text},
{"times_validated", integer},
{"coinbase", varchar},
},
"ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = (EXCLUDED.parent_hash, EXCLUDED.cid, EXCLUDED.td, EXCLUDED.node_id, EXCLUDED.reward, EXCLUDED.state_root, EXCLUDED.tx_root, EXCLUDED.receipt_root, EXCLUDED.uncle_root, EXCLUDED.bloom, EXCLUDED.timestamp, EXCLUDED.mh_key, eth.header_cids.times_validated + 1, EXCLUDED.coinbase)",
}
var TableStateNode = Table{
"eth.state_cids",
[]column{
{"block_number", bigint},
{"header_id", varchar},
{"state_leaf_key", varchar},
{"cid", text},
{"state_path", bytea},
{"node_type", integer},
{"diff", boolean},
{"mh_key", text},
},
"ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.state_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)",
}
var TableStorageNode = Table{
"eth.storage_cids",
[]column{
{"block_number", bigint},
{"header_id", varchar},
{"state_path", bytea},
{"storage_leaf_key", varchar},
{"cid", text},
{"storage_path", bytea},
{"node_type", integer},
{"diff", boolean},
{"mh_key", text},
},
"ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.storage_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)",
}

View File

@ -1,79 +0,0 @@
package types
import (
"fmt"
"strings"
)
type colType int
const (
integer colType = iota
boolean
bigint
numeric
bytea
varchar
text
)
type column struct {
name string
typ colType
}
type Table struct {
Name string
Columns []column
conflictClause string
}
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
var row []string
for i, col := range tbl.Columns {
row = append(row, col.typ.formatter()(args[i]))
}
return row
}
func (tbl *Table) ToInsertStatement() string {
var colnames, placeholders []string
for i, col := range tbl.Columns {
colnames = append(colnames, col.name)
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
}
return fmt.Sprintf(
"INSERT INTO %s (%s) VALUES (%s) %s",
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "), tbl.conflictClause,
)
}
type colfmt = func(interface{}) string
func sprintf(f string) colfmt {
return func(x interface{}) string { return fmt.Sprintf(f, x) }
}
func (typ colType) formatter() colfmt {
switch typ {
case integer:
return sprintf("%d")
case boolean:
return func(x interface{}) string {
if x.(bool) {
return "t"
}
return "f"
}
case bigint:
return sprintf("%s")
case numeric:
return sprintf("%d")
case bytea:
return sprintf(`\x%x`)
case varchar:
return sprintf("%s")
case text:
return sprintf("%s")
}
panic("unreachable")
}

View File

@ -1,33 +0,0 @@
package types
import (
"bytes"
"github.com/sirupsen/logrus"
"github.com/ethereum/go-ethereum/common"
)
var nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
func IsNullHash(hash common.Hash) bool {
return bytes.Equal(hash.Bytes(), nullHash.Bytes())
}
func CommitOrRollback(tx Tx, err error) error {
var rberr error
defer func() {
if rberr != nil {
logrus.Errorf("rollback failed: %s", rberr)
}
}()
if rec := recover(); rec != nil {
rberr = tx.Rollback()
panic(rec)
} else if err != nil {
rberr = tx.Rollback()
} else {
err = tx.Commit()
}
return err
}

73
scripts/README.md Normal file
View File

@ -0,0 +1,73 @@
## Data Validation
* For a given table in the `ipld-eth-db` schema, we know the number of columns to be expected in each row in the data dump:
| Table | Expected columns |
|--------------------|:----------------:|
| `public.nodes` | 5 |
| `ipld.blocks` | 3 |
| `eth.header_cids` | 16 |
| `eth.state_cids` | 8 |
| `eth.storage_cids` | 9 |
### Find Bad Data
* Run the following command to find any rows having unexpected number of columns:
```bash
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d [include-data]
```
* `input-file` `-i`: Input data file path
* `expected-columns` `-c`: Expected number of columns in each row of the input file
* `output-file` `-o`: Output destination file path (default: `STDOUT`)
* `include-data` `-d`: Whether to include the data row in the output (`true | false`) (default: `false`)
* The output is of format: row number, number of columns, the data row
Eg:
```bash
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
```
Output:
```
1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
```
Eg:
```bash
./scripts/find-bad-rows.sh -i public.nodes.csv -c 5 -o res.txt -d true
./scripts/find-bad-rows.sh -i ipld.blocks.csv -c 3 -o res.txt -d true
./scripts/find-bad-rows.sh -i eth.header_cids.csv -c 16 -o res.txt -d true
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
./scripts/find-bad-rows.sh -i eth.storage_cids.csv -c 9 -o res.txt -d true
```
## Data Cleanup
* In case of column count mismatch, data from `file` mode dumps can't be imported readily into `ipld-eth-db`.
### Filter Bad Data
* Run the following command to filter out rows having unexpected number of columns:
```bash
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
```
* `input-file` `-i`: Input data file path
* `expected-columns` `-c`: Expected number of columns in each row of the input file
* `output-file` `-o`: Output destination file path
Eg:
```bash
./scripts/filter-bad-rows.sh -i public.nodes.csv -c 5 -o cleaned-public.nodes.csv
./scripts/filter-bad-rows.sh -i ipld.blocks.csv -c 3 -o cleaned-ipld.blocks.csv
./scripts/filter-bad-rows.sh -i eth.header_cids.csv -c 16 -o cleaned-eth.header_cids.csv
./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
./scripts/filter-bad-rows.sh -i eth.storage_cids.csv -c 9 -o cleaned-eth.storage_cids.csv
```

87
scripts/compare-snapshots.sh Executable file
View File

@ -0,0 +1,87 @@
#!/bin/bash
# Compare the full snapshot output from two versions of the service
#
# Usage: compare-versions.sh [-d <output-dir>] <binary-A> <binary-B>
# Configure the input data using environment vars.
(
set -u
: $SNAPSHOT_BLOCK_HEIGHT
: $ETHDB_PATH
: $ETHDB_ANCIENT
: $ETH_GENESIS_BLOCK
)
while getopts d: opt; do
case $opt in
d) output_dir="$OPTARG"
esac
done
shift $((OPTIND - 1))
binary_A=$1
binary_B=$2
shift 2
if [[ -z $output_dir ]]; then
output_dir=$(mktemp -d)
fi
export SNAPSHOT_MODE=postgres
export SNAPSHOT_WORKERS=32
export SNAPSHOT_RECOVERY_FILE='compare-snapshots-recovery.txt'
export DATABASE_NAME="cerc_testing"
export DATABASE_HOSTNAME="localhost"
export DATABASE_PORT=8077
export DATABASE_USER="vdbm"
export DATABASE_PASSWORD="password"
export ETH_CLIENT_NAME=test-client
export ETH_NODE_ID=test-node
export ETH_NETWORK_ID=test-network
export ETH_CHAIN_ID=4242
dump_table() {
statement="copy (select * from $1) to stdout with csv"
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
psql -q cerc_testing -U vdbm -c "$statement" | sort -u > "$2/$1.csv"
}
clear_table() {
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
psql -q cerc_testing -U vdbm -c "truncate $1"
}
tables=(
eth.log_cids
eth.receipt_cids
eth.state_cids
eth.storage_cids
eth.transaction_cids
eth.uncle_cids
ipld.blocks
public.nodes
)
for table in "${tables[@]}"; do
clear_table $table
done
$binary_A stateSnapshot
mkdir -p $output_dir/A
for table in "${tables[@]}"; do
dump_table $table $output_dir/A
clear_table $table
done
$binary_B stateSnapshot
mkdir -p $output_dir/B
for table in "${tables[@]}"; do
dump_table $table $output_dir/B
clear_table $table
done
diff -rs $output_dir/A $output_dir/B

29
scripts/filter-bad-rows.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
# flags
# -i <input-file>: Input data file path
# -c <expected-columns>: Expected number of columns in each row of the input file
# -o [output-file]: Output destination file path
# eg: ./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
while getopts i:c:o: OPTION
do
case "${OPTION}" in
i) inputFile=${OPTARG};;
c) expectedColumns=${OPTARG};;
o) outputFile=${OPTARG};;
esac
done
timestamp=$(date +%s)
# select only rows having expected number of columns
if [ -z "${outputFile}" ]; then
echo "Invalid destination file arg (-o) ${outputFile}"
else
awk -F"," "NF==${expectedColumns}" ${inputFile} > ${outputFile}
fi
difference=$(($(date +%s)-timestamp))
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)

43
scripts/find-bad-rows.sh Executable file
View File

@ -0,0 +1,43 @@
#!/bin/bash
# flags
# -i <input-file>: Input data file path
# -c <expected-columns>: Expected number of columns in each row of the input file
# -o [output-file]: Output destination file path (default: STDOUT)
# -d [include-data]: Whether to include the data row in output (true | false) (default: false)
# eg: ./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
# output: 1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,
# baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,
# DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
while getopts i:c:o:d: OPTION
do
case "${OPTION}" in
i) inputFile=${OPTARG};;
c) expectedColumns=${OPTARG};;
o) outputFile=${OPTARG};;
d) data=${OPTARG};;
esac
done
timestamp=$(date +%s)
# if data requested, dump row number, number of columns and the row
if [ "${data}" = true ] ; then
if [ -z "${outputFile}" ]; then
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile}
else
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile} > ${outputFile}
fi
# else, dump only row number, number of columns
else
if [ -z "${outputFile}" ]; then
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile}
else
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile} > ${outputFile}
fi
fi
difference=$(($(date +%s)-timestamp))
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)

63
startup_script.sh Executable file
View File

@ -0,0 +1,63 @@
#!/bin/bash
# Exit if the variable tests fail
set -e
set -o pipefail
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
env
set -x
fi
# Check the database variables are set
test "$VDB_COMMAND"
# docker must be run in privileged mode for mounts to work
echo "Setting up /app/geth-rw overlayed /app/geth-ro"
mkdir -p /tmp/overlay
mount -t tmpfs tmpfs /tmp/overlay
mkdir -p /tmp/overlay/upper
mkdir -p /tmp/overlay/work
mkdir -p /app/geth-rw
mount -t overlay overlay -o lowerdir=/app/geth-ro,upperdir=/tmp/overlay/upper,workdir=/tmp/overlay/work /app/geth-rw
mkdir /var/run/statediff
cd /var/run/statediff
SETUID=""
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
SETUID="su-exec $TARGET_UID:$TARGET_GID"
chown -R $TARGET_UID:$TARGET_GID /var/run/statediff
fi
START_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
echo "Running the snapshot service" && \
if [[ -n "$LOG_FILE" ]]; then
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $* |& $SETUID tee ${LOG_FILE}.console
rc=$?
else
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $*
rc=$?
fi
STOP_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
if [ $rc -eq 0 ] && [ "$VDB_COMMAND" == "stateSnapshot" ] && [ -n "$SNAPSHOT_BLOCK_HEIGHT" ]; then
cat >metadata.json <<EOF
{
"type": "snapshot",
"range": { "start": $SNAPSHOT_BLOCK_HEIGHT, "stop": $SNAPSHOT_BLOCK_HEIGHT },
"nodeId": "$ETH_NODE_ID",
"genesisBlock": "$ETH_GENESIS_BLOCK",
"networkId": "$ETH_NETWORK_ID",
"chainId": "$ETH_CHAIN_ID",
"time": { "start": "$START_TIME", "stop": "$STOP_TIME" }
}
EOF
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
echo 'metadata.json' | cpio -p --owner $TARGET_UID:$TARGET_GID $FILE_OUTPUT_DIR
else
cp metadata.json $FILE_OUTPUT_DIR
fi
fi
exit $rc

23
test/ci-config.toml Normal file
View File

@ -0,0 +1,23 @@
[database]
name = "cerc_testing"
hostname = "127.0.0.1"
port = 8077
user = "vdbm"
password = "password"
[log]
level = "debug"
[snapshot]
workers = 4
recoveryFile = "snapshot_recovery_file"
# Note: these are overriden in the workflow step
# mode = "postgres"
# blockHeight = 0
[ethereum]
clientName = "test-client"
nodeID = "test-node"
networkID = "test-network"
chainID = 1
genesisBlock = ""

View File

@ -1,14 +1,12 @@
version: '3.2'
services: services:
migrations: migrations:
restart: on-failure restart: on-failure
depends_on: depends_on:
- ipld-eth-db - ipld-eth-db
image: vulcanize/ipld-eth-db:v4.2.0-alpha image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.3.0-alpha
environment: environment:
DATABASE_USER: "vdbm" DATABASE_USER: "vdbm"
DATABASE_NAME: "vulcanize_testing" DATABASE_NAME: "cerc_testing"
DATABASE_PASSWORD: "password" DATABASE_PASSWORD: "password"
DATABASE_HOSTNAME: "ipld-eth-db" DATABASE_HOSTNAME: "ipld-eth-db"
DATABASE_PORT: 5432 DATABASE_PORT: 5432
@ -19,9 +17,14 @@ services:
command: ["postgres", "-c", "log_statement=all"] command: ["postgres", "-c", "log_statement=all"]
environment: environment:
POSTGRES_USER: "vdbm" POSTGRES_USER: "vdbm"
POSTGRES_DB: "vulcanize_testing" POSTGRES_DB: "cerc_testing"
POSTGRES_PASSWORD: "password" POSTGRES_PASSWORD: "password"
ports: ports:
- "127.0.0.1:8077:5432" - 0.0.0.0:8077:5432
volumes: volumes:
- /tmp:/tmp - /tmp:/tmp
healthcheck:
test: ["CMD", "pg_isready", "-U", "vdbm"]
interval: 2s
timeout: 1s
retries: 3

438
test/fixture_chain_A.go Normal file
View File

@ -0,0 +1,438 @@
package test
import (
"sort"
"github.com/cerc-io/eth-testing/chains/premerge2"
"github.com/ethereum/go-ethereum/common"
)
func init() {
for _, path := range premerge2.Block1_StateNodeLeafKeys {
hex := common.BytesToHash(path).String()
ChainA_Block1_StateNodeLeafKeys = append(ChainA_Block1_StateNodeLeafKeys, hex)
}
// sort it
sort.Slice(ChainA_Block1_StateNodeLeafKeys, func(i, j int) bool {
return ChainA_Block1_StateNodeLeafKeys[i] < ChainA_Block1_StateNodeLeafKeys[j]
})
}
var (
ChainA = premerge2.ChainData
ChainA_Block1_StateNodeLeafKeys []string
// ChainA_Block1_StateNodeLeafKeys = small2.Block1_StateNodeLeafKeys
ChainA_Block1_IpldCids = []string{
"baglacgzamidvfvv6vdpeagumkeadfy4sek3fwba5wnuegt6mcsrcl2y3qxfq",
"baglacgzakk2zjdmtcwpduxyzd5accfkyebufm3j3eldwon6e3gosyps4nmia",
"baglacgzaxt5p24gzgsgqqpd5fyheuufvaex4gfojqntngvewfdhe54poe7jq",
"baglacgzapngkev2hcarm7bmcwdrvagxu27mgu5tp25y76kzkvjmrggrora4a",
"baglacgza5fhbdiu6o3ibtl7jahjwagqs27knhtmehxvoyt6qg7wuodaek2qq",
"baglacgzakho5pd5qpbxs7mo3ujd7ejcjyhstznb3xx3fluukdjyybxn4aexa",
"baglacgza2dbonmaqxik2vhbnfzd4dhcpyjm47rlbuz35cha3jy7jyxvrsoxa",
"baglacgza5gn7vz4ksy4go5joxn3zn2hgzf7sudxlq7fthztqhj2ikql3spva",
"baglacgzas6yxvcp5fqb65gglmrm4bd2rwju5uxhoizsq5bchb5rl7a5uh37a",
"baglacgzamzsn226lwcfyh6cdetnyzoxsz2zcdze6m2lrg2o5ejl6sr5dwe6q",
"baglacgzasogvybtxh67x26ob42m56mlgnxwdelfb24oobk3po3te6yysmmca",
"baglacgzab7rmzczswht4isr63gea5uoww4pmqsxrvgzn74wheqwopl36mela",
"baglacgza2ovtxz2bp6yccm56iacbpp4kgthyz4k6evyp5lq4rzmp2c23mnhq",
"baglacgzajf3sy2bvf2vu2d4hqvj3rvq5lblzp4qptxfb4ulcyayhrrdszghq",
"baglacgza4wczwxeuvdhklly5renpmti4x34ilhhmgdlcro5jjpyhowgvdwpa",
"baglacgzazikph4bqhr7vgs2xiqpebvoyazj27mftysmy6mzoigkutxdxt7ma",
"baglacgzasvwqbzd4k6hoheken36oszbb6b6dvfc46acsyhfqssajcqd4xzcq",
"baglacgzaui2r4k54xxqxadyjt25kzovmlelw4obn3fpda6gecswheklvrhia",
"baglacgzacq4j5rfibfkuxvwa5ui6zpeq7h6edgmquy3oguz6zxxbdkfw6upa",
"baglacgzalihtntqwaqxyc5z3olm3odzztqlq6d27rx5mdt4gu2bdxgwwp7xa",
"baglacgzat5btacphq4ie5kecajgxjfgvooqza4zb47w24ibv5yvz2dy7zyea",
"baglacgzaet376qv35issfdnd44lpe3xxtmzycg56mibqh3ehd6pxbxj6bpda",
"baglacgzafkeckix5qfiuuorchl6xdg2o6vis2qknjirq63vryuqcyl24kwxa",
"baglacgzayesgx5kytkdemwcwmhxd435ka4aqqpwm6qugtirlnpyoyjexg2ka",
"baglacgzamknqvkqe37lskybr6dimt5ngmihfsmnoe5mi4yvtu7dq7tylh5ua",
"baglacgzaniotnde2dyyjhdnud5batwqnq3njuh2gotx6hivafivq4qtt22oq",
"baglacgzaov7f7oz4onncim5hhnlbjlz7ozpom26kfh66vjow3w2s2cok6ska",
"baglacgzai2u7cil4gzmzbas3pulb7qr4vzlirt5wwiyh57slomwhepqdpfma",
"baglacgza6twdmxbxie5v7ht5hdb4mqezel5cuwjxk7xwc5vxfepn4wxcwllq",
"baglacgzanax447kk5lah6ed5gqzg2eefwyygfn3l3w6n7eio3w5ohhluo7ca",
"baglacgzawxgpzpbsbi43icxcrchpoxxcaugcsvh6eusiswwjrtkdlugveana",
"baglacgzajshfqz2lgrejfi37nhstsxmjeh7c2jfok4znn4fezhmr2mlwpzhq",
"baglacgza3ask2jt3sjqfdiuxxx3fjipnxzp2u3in6z5d3qflo5fxh7ihmf6a",
"baglacgzavtfwj5dsgw4vpplzv3zsw6fwiykcpz2lpclspzq55u42vij2g2pq",
"baglacgzaelxcuf3wfrqavkk2uunaqjwp3wiuisjreuarwnbiqtdbrq5kwkuq",
"baglacgzajieha4wgbglqnmt4wbooug3ffnvayz2lqkqpop36elnocsvprkeq",
"baglacgza424ea7tewjqbcwi5fwcticsbiinwh7ffdf2jeqrmjzrpv7xpo75q",
"baglacgzajg3cp7yoxohz7luw4hzvg5cnzcduabrogcqy7ilhwhp64nmsn72a",
"baglacgza6ogjls57pq4k35agbzpeydujoq65lpoimp4iv2d6cegrdjk4frwa",
"baglacgzaqr6cfr453mxviwkqsjfz3riq3nw3lrh7lmev2nuwoop34mjmgjta",
"baglacgza5wvocvjvd6bdjteyzt3y7sdimlfxra6c4ndihqlk3oewgwclny3q",
"baglacgzamxpcef5svw5bshjcmx5dtw3jvdnsqxyqdoystvutgpk3dbxaddsa",
"baglacgzaihrnrw2zuaucifxzmpyg5kz2evaagrybgq2nm4sif3jhr7mljnka",
"baglacgzaydqlktfraw5nig2lsjmigudumpo7vzy4mgn2fza5nvl5ukri577a",
"baglacgzab2orhwmiw5gxfsqb3bwckhf3tf5jztbbdn2i5eyk2kvd3zfi7hlq",
"baglacgzamfflp7uex2uddjuoly44nywthhnugk4u3tjjvr2542km7rtecsla",
"baglacgzasfy3a6qvsisuwzgjm3w7vukbubffxx7ei3eqh7f3v2ftrqrfhiwa",
"baglacgzayrdorxqktwlfykcpqo3uhyfds3rlsjy6rcapz42x2lsc64otdonq",
"baglacgzajwya3t5k5mqyvipqqlahodjmmsljwe4df42igrc7pdgqzbc725sa",
"baglacgzalc6y4rmk42q6ix5cxhpinwyhlbnjobwb4knsqr3xe6qv7m6qkibq",
"baglacgzaidbvljbgsc2tpdyjwzcsqpszjotijnbls37ropeazffsoi2wamkq",
"baglacgzacuyuir4l6vee5vuf5elh7tvnwzymf44c4qpzu2ipo2tbbyp4e3oq",
"baglacgza6coc33lehemkv73byblozayqgaclz6xko4kla5pcptbgwhkyoibq",
"baglacgza7uco7rtze752545y336slgt7pczgdpmkb6j65x3yydfsprerba5a",
"baglacgza4eanzp6ludjfoqr4h67bzlsxjartrqqeq5t4pv2q3b4x2padxbiq",
"baglacgzaoocvbederlpqaufwkwso5pl7qkfnrpd76zj6zbwgj5f4qcygis3a",
"baglacgzavx7pxqr4m7pfzcn6tcc7o5pq4g5tp6qvsykkhe6rugqat4a2kuxq",
"baglacgzaljiw3say55ek5m3x64e66wcifr5na7vbutyuu3m74gimlh47g44q",
"baglacgzaqrzyy5uetfwsqgfvv624scsdw7dx7z42pf47p2m3xuhqwuei27ha",
"baglacgzayxrz3npxgaz2byd4onx5phnjyfwxfovjbztg6ddrhwew7pvynq7q",
"baglacgzac2cndcn3vq5mnjfoz7kdnboebmshmdmvnb6aatzkwnegyfug3cqq",
"baglacgza66vjwzsh6wgfv72zygbwgh2vufhfuagmf36q6r3ycnwxx7yaxqnq",
"baglacgzac5uhfzgshqvvqme5iw5rx4n3g5lij4eapzaejzpgm6njrec45qaa",
"baglacgza6ta2auxqjgh7o2oj6x3ogcrx4cgfxlupdccrq4j3p5zjnahnq7mq",
"baglacgzaaokqnkj6sgq57ikquob6w6uhvo6v7ni6uy677pqzr24f3nyll5eq",
"baglacgzavwymwhn2owqnbm43vvqtxgd3ab5caqalvs4sz2tzc4cs74b43q5q",
"baglacgzahlzt3rfhisvv5xkcyxc73sm6ijh54n42zfsq76ysi3jisro646fa",
"baglacgzaqhglxiq5ptweegtm64wuezj7spc2u2g5prw6zdgnwmjwfxdbn5nq",
"baglacgzadztftc3rxrphupebphkbwuzdtnthtyl4pfxga7wghxthe463ncya",
"baglacgzaz6agggjviebqoyw3sdos6z3jprjr5fe5vprt7dlarq5gxm2swdvq",
"baglacgzasdc5a3pa4mtp46bpsru56aojakeucvy57654mq5o2bjp5mop6l3a",
"baglacgzaqwwwnlav6alcw7r2umugzbxppixu6mqp6w6qyriffo27mummjmca",
"baglacgzabmrd6yhbgxhmghn5nguatwnzhrondlaxmagzxyzqdm24gooneucq",
"baglacgzajblmw25dyrzwsfymo74y5h67v4nrfgxs35eevemvqfui3y7rkszq",
"baglacgzaivgvcrgjwicuf4aremv2hbggrnzntrddmydzud6rkbpb3xrbpdia",
"baglacgzagpnopg2w6cmfzi3avh7c7ovd6rlwmnpu45kkb3wmlx3etchiggkq",
"baglacgzaom4zyvyb6kn2hoiyvwg2ywrwgr7o5fe5c3p42z4vuhfzuxmlaoaa",
"baglacgzawj7icprvylimlisn2p2626vxy7ukwps4t67gvrhduz5hlk4aecyq",
"baglacgzatnjb6dg7fsz4pesso63i63c3t2agwybbgd3i5u4ezthjuvddspea",
"baglacgza5oahzgmmqeqqszmqsfbwaq36gbirizq6aii3zm3jyud3pgndchlq",
"baglacgzaxyyowwmsdsveoyjw7ywj67krm3x77iqyy3gzj7fdc4xnzjyirsfa",
"baglacgzaew7pv5vcxev3udk3dh4eaezwpjgi2pxwqa3umwmtoiw25q5foqwq",
"baglacgzapexdm6koz42fosvv4qjbqhnhsuevh7oqmqwonspl63t2vpjqitha",
"baglacgzaixcais2z6gwyafi6bpptra65xswthhpd5g26yr3d6ahn3bl2uvca",
"baglacgzaimssao3zceshkgh6gltjqqqh2x5qiodirixcvjqutgvdphog7dma",
"baglacgzacgrm2zlg4dogiza57lwcti5r7ga6ucswdsp3mp2277jfa7yx77fa",
"baglacgzapsts4gledg5dyjaileaqdcffv5zcw6qooifqxgl26bxsoi2n4waq",
"baglacgzagz2qudg5ucppkpoeu5iq5nu6q7527mltt5i5kldaeffx4djhnxoq",
"baglacgzao3ht5gq4vbud5g5wbwsx5wejlbvgecqqadditqhk5yhbgw4tkbna",
"baglacgzacuetfnthnppfxkfzgfza3exvy7gselbqv2s5b6czidll5exmqwza",
"baglacgzaqbgeg6rmbd2zxpucpdd73kb5bmmo6p2p6eonafojtqkwi563ycoq",
"baglacgzape6j3mhckl4plr42twds57ctqwvwgku5ymjboy33gue7z5xqwaia",
"baglacgzazy26zckarnz3jfpcwpqo6rwr5r4wy7bonmc3rljbkr77uoiyoxca",
"baglacgzabadhauzo4lxjpslyal3fb5gfrs55hsycsd5r2mj4mkvcgypcvs4q",
"baglacgzao7aftivtmdu4sz3inijqfjajstgwhka2vafiigmr3dz5on43ndvq",
"baglacgzahtfb5mhojo7zknjhyhnf6o6d65wkz22ellgvxvz2cf32dhrno35q",
"baglacgzasx2czupgncbldxwxkqkxez6tt2oldw4iocqrhc7gk6bgp26g2slq",
"baglacgzaqeijuarx6vrtycc5267h5g3xzgskgaylrftmyjq7vjouxvkb5cvq",
"baglacgzalc42jtx44sibtcvjjhz6drbt54y6lcxy6ucmngi7cvdbajiebndq",
"baglacgzahbvb5fbnx2ddikyx4lulfcrftvw3mxpy4bpziskruce3xhz5tcpq",
"baglacgzafgf6pv43422ibuujti24hazwtn3ohwylzgo3mt6qu7dven4zlqdq",
"baglacgzamet5xv7ury7dnkqy5yltgbvalcl4ricsvdduy7hskmyxslvsa5sa",
"baglacgzakxelvpgmk3loheqewteco3z4pusavgv3cjj4xzylahmsiqkwovxq",
"baglacgzacqbsc6t7cqligdehacd4kjg2xlpdtbjhd5xtngqswaiiqpdrsj5a",
"baglacgza72em77piwedfycox3l4y7qbskqlptpcy7r725im2tpsj23si57ga",
"baglacgza636axkok5ao37hjupoeksmk73f3rpimd745avfcoxzwz53bp3xiq",
"baglacgza5n7yqni36tyi7clfxxfqciib6j4e3fru6ye3eticdb4b5i6k4m4q",
"baglacgzanbkitjrv36vsbyxc2fazsncuapltoqi5yxyntfjtp52dfmw5z64a",
"baglacgzazswo2typlq7izwoll6w4xnd3dszwktreiszh3b7w2kt2ucll5okq",
"baglacgza44bydaixin7ymaidhsaawjsemc2wkds62ahiaqrtctpvzo6xitaq",
"baglacgzay2b7jkphp4kufkhmwiriduyg5kgmqyzjojikd6hvib4bycl6fkga",
"baglacgza245jp2gg7wvxvbuvdxxynbsfzynj767o5dv6tkgsaghgsfsmvfya",
"baglacgza7hvenpvtima4lqksljjfeiou2lwhy6h7qvmdaxrvp6iglprd5ecq",
"baglacgzarrbzhd34po574cixc6tk2wd4escxarqzoqnlmplqkirhq2ms6wla",
"baglacgza6wjkyvgipgaxhclghpthoftpkarjiprp4g2smf5b2foc6nj7e7oq",
"baglacgzavtod2r5swzrok7fapkssy4mufrtid37trvz2jxzhnifxh7rdgxdq",
"baglacgzaaju4hfbrfcsgxp2dqrqdjrrfdjwjhbcubmmum3wsveqgsisv5sjq",
"baglacgzagfnw4qkfwuqlrd7v7nryxergohxb5s6lmw2xxgsl4zikwh6odu4q",
"baglacgza3ieihinvg2srwi7dupigwsahksvrlhninkyxt4ewb426uqmqtjnq",
"baglacgzaapcyag7sitbiyxcdbbj5m6l64vhx4gt4hbhvdwgjuhoezwlmw5hq",
"baglacgzam3qbvtektatlypk7kkdidh6fra67umeugmy7dz77fful7rl6ulia",
"baglacgzaeifznjadvk52cuv3qvbitazdkkavu4q3detg7xqhmsuykaemme3q",
"baglacgzaqdcmhkhjwdwatfshq4axfenrhggqceqrz47yiupwweqknnrvqfya",
"baglacgzanr74m4zutwqp4ybkpgdyborqoccfnigwlv6ze3hyou5jlrrnxchq",
"baglacgza5zaewwegrxjtaezosakyqpplolmav35eqfdyjju5okk3tmogbtkq",
"baglacgzavsgqcwu6m2hvq574yoi7vyzzqhaak5yjn4cflnbn6t4oqce6zysa",
"baglacgzafnsgu5ksxa4sv2kcmn2x62m2e7losf3ljqdlt7akoixyso4wi6kq",
"baglacgzatcbgkfcnzesrtyfe5hxe2yuqek2hvgmwvla2zjo3i4rvhnb2k7yq",
"baglacgzavzdzgv2mihwc6qop5hkv37hhx26dmnq75sfg3jf4nkq5vd4pjvja",
"baglacgza3oids2arkgomy6bblcggrwooaqyj3foxbxiawhckxhyc5phxqzgq",
"baglacgzaj2yfzqrtpjd6luyv7spcs4xyrmrifsxm663zznegzt2omto7ktgq",
"baglacgzaegino24jsful2fjnpe3haf3hhztdzzm626rdtmksxauccfzv335a",
"baglacgzazvm5p6m3ynh74glcwhuxtw7b3hv47ml5y6mtif2whmklebfd2mka",
"baglacgzak7v5o37lheriih5julg5c37gc3wpxmxudysjo6fttnju65efl4ma",
"baglacgzafkusmmr2rw7vijysdeldocemzrvwszho6nbvxakcy3buf3ytk4oq",
"baglacgzafiiwa2wygo4qm76xt3tekscp4ioub4u34vz2aqptp56frudzgjkq",
"baglacgza5vqm4jugxseggsbniznupli2bivz4drwupzzyfubqmt2cggrk7wa",
"baglacgzae27ionu7mlu3ojudqd4a2ywhyrenxw7zrshr4jhy4ld2fqpgkkia",
"baglacgzajdmyteoo6aovcp4w2wfnqlwp7hhncrgkajtqm3fzbxo3zhoko5na",
"baglacgzaan3c7frug6yo5tyyv7kzn6bzrxtwkwy35bmuvikkq3v4i6suovpa",
"baglacgza7p3a62673mtcsidsps3ep3atul26nzldgscxv66rvkmqj2gjdejq",
"baglacgza37tily665vel2tvvcavpqtj7n3qot3zxvpsog63iqkxmfldastva",
"baglacgzaeuvjvxxqf42qg44zjlnpje3ls7kpu2hx36uho45n27jjikys2jiq",
"baglacgzab5yedqfwm3pczaqnqfvsondxhdyorywu27q6strjbc4ixq3glizq",
"baglacgzanynqqlgddfsdtm27kvidm35d75yocvndtsdeijt7z64xkilxin4a",
"baglacgzai5bxsipie422mzr6u2itm3wgfyg7p425rcqn2hg4453fxnepaa2q",
"baglacgzaarg23ok2cd5nr6jc4ocetujiqb7nnrft42xvfuh2vbs35dfyqr2a",
"baglacgza4ztanbjvytkd7462vy5jbgwoqypahkw6gzi6a2h3ktsisf4wajla",
"baglacgzaqp33qaf7bfj5w6e4k63cbrc3oqemubyxgjmv7wjcroatsqflba3q",
"baglacgzamwsrbjbo7pyf4ftaizzj2lsqdqhivh7pu2evcgraenjg6sx573oa",
"baglacgzagf4zu7uebnql22h7pmuxotzjcs2y7y7o3dz3nsogfou4dqxa7pja",
"baglacgzaaqveulltjfdqenhsig3nzfwdwxso3ndbgovg2gnczkqop7vpbbvq",
"baglacgza22ifq7h6bot66tpn5xudjfcqtydvk7bcang7lxosyfum4ifhd4cq",
"baglacgzarr6a6fovyug5em3cqkzmggna2nvjohihdin5ffn4f7k3cm2qc5gq",
"baglacgzaao5djij6f4x3jp3qszkawqwusvofe2mhloopb55yoyzfqxkezgsq",
"baglacgzavcbrgucanfxqhbshrz2hv62vfkrtrhlv5qx6swbc3zavqvcn6zta",
"baglacgzark7ier7445klswjg5eqx5qxoiibq5mrmbctybd2ffu4gwffqkwyq",
"baglacgzacahqtmufgqhyzdgynhxsezldqc4merrerrf3y4jw5d64umjg24oa",
"baglacgzasfdhsvcjbujhmmosulzzu3w2xvyccu66qf76rwrkxgrqke7fy3oq",
"baglacgzast2lxo3sgtk5qtnp64mwxyjuozwyt5v3rg4ytrnleporcqmb62ua",
"baglacgzauwwnb3h5pxhm2h3tmcxrc3t52jlbpibalnpywnu34p74pbge6wuq",
"baglacgzasb5vgdsv56jygtmspwoswmezrfnp2kray7xhuszshqa2dfrs3ypa",
"baglacgzabhaasbte4bwnubvxduslb4am2dotafbel5lxvzki3wn5rs4dl24q",
"baglacgzaqm53klhsbyfek6wnzmzsah7iz2km2euk75yapvez7fyl73gfxhxa",
"baglacgzawaf7gawvue34nkiksyyrpizlmtkuu275e2xxhaxiirhsmmoeo5zq",
"baglacgzaaqtskzrmoaoexhra66tmvdxne353oxcxuzq2dca75ldjrqqhoiaq",
"baglacgzao4txzget4reg6nj6uwptwdu2n6sohzyfeivkdwdzvziouna2uvua",
"baglacgzanm2vfedt2eqsljbb3iwri7hu73bnb3rqgrurkmrsacfzejju2nda",
"baglacgzavxzbb6zhtlf42msx27zozxk4a6twphs4qsxchlrt2ny6t5we2t3q",
"baglacgza267mwypnyml7gmua2bifcmpndmtwzzw2dfjox3dfixo25uopnmda",
"baglacgzat2wiom6pryjqdoptciek3ckt3ctgdeujprivuey6ypgfsjypr65a",
"baglacgzavz4xq4u5fosiyz7ldtzluikmtco4k3mv4xsrnppjz5omgutz6abq",
"baglacgzacj4uv2ru2opsecdduklxkbxl4vkvyk3ercuunh7nsgfxit3h23mq",
"baglacgzav3o4q33y7amd7bgpfs5xc3kog57nnhbruh2s36pziymkmv32dpgq",
"baglacgza7hx5cpakzowq2h26ocionl2t2p6ifhui6pju5xug6wgifi2xkv7a",
"baglacgzaty5w2ykcxoxf2zfdcr742hzezg32vyanvv2qz6hbox7atjqknqrq",
"baglacgzaoyoxana7gxkhxwj47iiqjv76y3ktnk3kootf3pzfpxcpmzp6ptma",
"baglacgza4x65ftjd3telo3eyyzrgosshvnlu7kj7enzezkwiowxsentq2twa",
"baglacgza2u7imlxl3apzarjovwuegtp52a5h546qnvw3hzumxr6qlx7yd3aa",
"baglacgzay2imkpytg6m7kmq7oloogxzgfc6t7sm77spappsm2iajkdsqif7a",
"baglacgza2gxxoee4k2cxdf24whfylc7x2eb6eshvrunugemjp766sxhbx6qq",
"baglacgzaz6sqay6zefbflfsyrt43nsszivnrywlokmridmcox45ehavr2bxq",
"baglacgzawx34khb3fvi5s7yxduvtrjg7dj6avtc6wdpenpxp6tih6xwsbymq",
"baglacgzaxh6czvlet4gmuorror6l6m7qrr4ymkolyr4lzofbme763w2peijq",
"baglacgzaw7it5iumtdpxyfxvlizcwsthfsemmyjqmb5cq24hemei6dftsjtq",
"baglacgzapevdnthqwueqltoge7dt2cuxvijmhep7rw6cnp44pemp6sluitka",
"baglacgzaesu7doagjxn3mknma6nifhvfjoznwlgjqomq6jpxlcejioxu2upq",
"baglacgzahojkgpcys6csj4cos62mt6fwb32xsoca3l42qci34zqjmtyvd7gq",
"baglacgzauefudv2ingzufqe36jloewm3xketyjvnc4e4djtpbathwjm66a2a",
"baglacgza6z2kpaqbk2lezgrkqrznv3c7uaomvab6646z7qo6n3rsbz3qpbka",
"baglacgzaeqh6atyhyht4qqqvcyuxdg3uqfu5x2mujowput5bjcuor4vnzrla",
"baglacgzatwt5s5k74dcvrm6d32p5zx47fcxgihzyzf4hwbnxhkzcvzj26pra",
"baglacgzaszpquuoaaaq3auktxvag6h3fuwpnnrv3chfrymdwb5khdqwfxa7q",
"baglacgzaf2bu6l5bt57gstxyudjbbrj6jddfac3qmr5jnkt6tgwbj3qpfavq",
"baglacgzaeph54ay7tbgyox3437nbngzluz2k4kkqmjh6ymgbuakg2c3mf2da",
"baglacgza2wso6cd6qxxk7kwtcgcx6gg3nztqk7h3kepb7if653mn7magazfq",
"baglacgzax6ioorxkqyls3kmv2ntmfhsbptavrrtit2vy6zmgbnltjjbyogpa",
"baglacgzawf46giyla7nssrdtvzl7afycmj4y7dcvdr2vwvtfvtqscxhocdfa",
"baglacgzamyk5sdzyg2vnuzaqmbwwzqbbh2xxgfcouukhmcjcudy2jdw2dy7q",
"baglacgzaizfqoqu2aubz4iutcsjnnrrfdkdayamouoiaixkznmnmcg24pktq",
"baglacgzazcudtwhvet6q264rgjonf6nt2a3omigym5wpabkq23kdeyvxqr6a",
"baglacgzatymnlewdcj7uqohfdcrcszva7nzezhgib6risqpenllqdfch3i3q",
"baglacgzat2pxiuhdayqh4ma4ss3wxk2uyipuciqonxig3z6jitc5kdmrozha",
"baglacgzafokb5hx5vy5ltj4ee6ndad7c5fbak3j34ap2j4u2i3mbt5oeqkzq",
"baglacgzakuwsijjghgtk4522uhpxad73slbechnou4ug6fmniqebzals2bza",
"baglacgzaxl62rn4xijbrpvuzkbb5awzhuasuihynltlwwau4lij3rn64rb3a",
"baglacgzairaleq3xeadqowm7ec7kvxmbjsmqrltobjcqjso545a3zdcge72a",
"baglacgzao4vipuem6ogey2f73z3qs2cxdk6rn7jygxfzajegxuxfcxktyewq",
"baglacgzafufkadgo6qcmddvnavloopfzmozwxi3p4h3mjn5jw2xmj5ws2ipq",
"baglacgzai3dvv53agiud47vx3fs6gpqg5gvjze5xsecatnh5l34e6pgocbia",
"baglacgzawug56abirtemcm2skgyexstfmmrvivru3xjcgdyxqtj7ef3jxnjq",
"baglacgzau4tmywowb37dv47edd7pl5af222ba23pfrlukvkbersc6vrv4qwa",
"baglacgzabqzaabcpgd4pnucu3izbykoognju5kc5qwtfkualy5r6todywowq",
"baglacgza2g5mo2mblvbfjjrm6xk2ppf6jplupamowaqb4j67szvaytx3wfra",
"baglacgzaw7ftkn6xzbnwyvievvi5xuoqeodvbdwirel2cvx4a6kracedtiza",
"baglacgza6anvax7pis7sukuzo6t27drgmckh2ahdork3wmzhqquidlakjpqq",
"baglacgzaywc4cisesa54dmxrzulfzvg37ldoe3vebiqoncqtrhdxaypepf6q",
"baglacgza5ndtrasv47fgrnbpuvqyaam4mhrn2ma37yqce3lkotlzl5vqc2ta",
"baglacgzargpxdk5rrrwjkyiyx5lh7ldctn27p2ksnbz6ikot3cv3nw5vqaqq",
"baglacgza4rw4nllzvg5j3kvvrsisd3jcwgq7htdege42ris6ddkpiti65ala",
"baglacgzaoao7i2mmwuopg2gfx5m3xn34fayjdrov2yolscqtz7vi5emdqdna",
"baglacgzavwgvvyakic262434m7kigrzlmqautwbknymr4fyngjkobh3cyl7a",
"baglacgza6gta5cebz7fs3riluwgde3gmtjw2qkd4dzpvnuqbovr344aaldca",
"baglacgzao6ru6zkgi7lknzzc4xogdvi5bkoux6gaoj4rejbazar7yavge5ta",
"baglacgza2lsx6yk2i5iiy3tasnjvgqult7a4y5lhpi7lr5pxhvq52cvp6x2q",
"baglacgzatou7j5blylumwrr5hfsck3hqrasegy55ewwgldtwew3uykaszcmq",
"baglacgzaqi5dqutwokxefveag2nibmfzylw6szglsntiybeh4e2bmb6f2xxa",
"baglacgzaovkdfxjerufbq24zzqm767juiyt4hcu4ivlpvxh447w66rpfvtka",
"baglacgzawez7iipzfpgi2jirdwusmbvhdjporhu77ejvoam7duwmequa4isa",
"baglacgzazlnsvtqu4zd5tjtz5bct7d2aqiotmfsfg4eg62bki6qiti6fdl4q",
"baglacgzagfqonr7vtlbdofwm34pkoz325axn2v4pxyxbdly5enjbfnwo6eyq",
"baglacgzaljokkpwqxdoaoyrmsml6b7b7zfiqefbhwxlmexxepy2d5wuyekya",
"baglacgzabu6rq7xkdr5uoe2eunlx773yg2kk2h2lho53ef3c4adky2jhs6fq",
"baglacgzab2hdhand5g57pqt4uslpy2mz6rqnkwlvw27bczvsc2tj2m3pr3ba",
"baglacgzaugsxw7cthfl3fg2rlhemgut2hhitktn3bovkjd5hawrvi5ss7gsa",
"baglacgza6wtl5yiy32ruo22c75ysjtnxrghptmimp6fp2pq3ilpaxqyn6c2q",
"baglacgzauokbnjmp7gn4sz7e247j7ift5hrueq4zzq577m557j3bmqnwfixq",
"baglacgzac2lofvuakrf675xzz6hh2ahgbd3z77gxc3ofrjolqjqj7dqhzopa",
"baglacgzabsc4xuh7rbvblytwkhn4swzctyu43ba36xoehvuc7cpmbnkd3ska",
"baglacgzayunrwjhott4rnqk7fniizbsv55apaqalgup2fnf66qip6aartkcq",
"baglacgza3zbafsnpvwa5xw4xpjmx3ndhmuhynaoxxrzwcnfxi6o4rbwpu2hq",
"baglacgzaqm4ijihatant626rqycd33xaerqj77zivb5iwmgyaqwgysc3zf6q",
"baglacgzal6llyltmvocfvqgxq5ltwunaus5ntfhl5ze5f35kd67oj6y5lq6q",
"baglacgzauyqu2gqzcc2xtmahbe4bnlubzp2thteevnp6bfd3kxpnxozq74rq",
"baglacgzazklwtf65v4dpdcms6yqh4t3kawlz2b5m5lmwk2afq6eqc7gg2bvq",
"baglacgzaoyn5xje7zjq52lswouegf3w64k4zhyqp6iclfsyj7wgjfjwyvicq",
"baglacgzanrcxybniprkx7bhw3ggpwn2uuigb33ifkdxuavbt2niu6mzmo7pq",
"baglacgzaxxsmknpbqxei7ffyjb7fhqtvfrwxr4t6zloyavtkt3jygvsldlra",
"baglacgzaaiqagvbyp2jrclsjllilvba5ajksvpj6rsygtcx5suskigolta4q",
"baglacgzatghruydgf4lodn6vmjtvfpvf755goj3jkeusdwia5pixldcqjmtq",
"baglacgzamfrwerukgoisehrxqlnefyww7ohkihngxxjnm6pcbpydoxagcwda",
"baglacgza4ypfm4rxwsoejwhza3housicojqliaimccsupm4nrmjrxhj3n6ca",
"baglacgzagp3wukeubt7wqrdq5okknvbyh6rueyo5t2np5rg2whot573jq2qq",
"baglacgzaxjrq5medoijedijmlrkevn32vsthf6vhgtojvtlttxo2ze5brbja",
"baglacgzarwmkoc2al7nxgjxdysbzdiq4yfcbthxhbs4hkquxxnevsoxnwc7a",
"baglacgza2jleouo2qqbrfv7uc73q6aw4svm74ltjhzhsqhpmqdcsxmvjxurq",
"baglacgzajno3x77dsi7inf4voolwgevuslix7ays2u6oh3z5mq2klkwbj6hq",
"baglacgzar2p263trvudcq3qwjppcpfgzmxc4taacjtekhkfzsqtatl2wp27q",
"baglacgza5efjepjsmz2y65dfccco56i5jvrkn3wochllzfze6k3o54qkvlaq",
"baglacgzaxrwu73uyvnvmbfuepvcxeryunic3ozbn6t5uxwypoy4puej6z52a",
"baglacgza5ux3uey7vxvn5miif5lf77ywz2yar5utavxdcqbai4lma4446hqa",
"baglacgzaufpcg6e6rm62ybb2a35vwtk2ptqt4z74pj3zmii6rx3a3dwnnw7a",
"baglacgzabnitw6kehgnmpyrjdk343qnzt4cekjlmypymhnvvylkq5k2ptcdq",
"baglacgzauckhnf4srmqecrryxiflfpf6kavfhm3d4qmjzkxg27f5dj3546cq",
"baglacgzapxzpwc5xrysx6y74fs6pybyqlfly3olnv5zaazqsbuztbopuc6jq",
"baglacgzaqtea7gzv2h3jroibscowoifdm64hvqievgvxg4v6kymat7e22ncq",
"baglacgzantxg5ciyqddbw2tjz5kwrbh2lmxikruq5ifa4xcfsiwfgs2fheja",
"baglacgzajv4bm22iarh5ykhneljp2ooi35xyvkqezny5hilsq2cw62et76bq",
"baglacgzajiyfhc7uqabfypgpvip6dildryb7c4epz3tzxsoejbliwozlbphq",
"baglacgzahsh7cceh3en65fkgjesotsxs3pqbhflxzv5kdkxnz67jd7c4pczq",
"baglacgzaz7hm3bnvwozlapazmwe5hu5zxtin37ab6aam32p6hsvudxdkbila",
"baglacgzaz5yvtye7y27sz7oitmxfgt5yvqdzcn6z6x2vxar7rvluzqoh6dfa",
"baglacgzafelbojewhho2qlzz2d7txvh7ycbjntfmqkwdxkiw6raesraqfznq",
"baglacgzawat7pexa2n2lq74lyoq6axky2qzzyf3h6sa6hrucjc3z45elm6zq",
"baglacgzahwk3er5cckpklgmlw57cna2p5hkwwekjkkh4iz62pm5ybievfqta",
"baglacgzabi63cfckdctmkqdhbcdwszzatr3bfcyyuaocrgnypedvjmjog2za",
"baglacgza4fxgurqdgfxs7ja427ikr7e2rxfhzi3hmov6hg4z55l3qow7kaiq",
"baglacgzaxq3k23qmqsllx7iz2ymhliqz2jewob2nckhdd2wkxtf3rb5drpwq",
"baglacgza5nzqr7e7b3h2gmbxz24vdcmfcoadnzbie6nbtvigpyfigqerrxja",
"bagmacgzakvveqidigvmttsk2gqjl3mqscorqcsb63mnwiqbpwzvmt42ygwmq",
"baglacgzalodtjmdplb7dy2p5arsxk7nyszh6lhsyzxe4lgkdgrp6rymxzela",
"baglacgzauzvc7x64vjf6wlwaisddf4vf6hjsfmtlypnadtb5i7kbbasizmma",
"baglacgzaixlti7he2ffvgp6raqotxkdsekh5qy4duv3tmtn6kvn4n6sjuu2a",
"baglacgzathtbu757wgovtxofbnlsnsyad662vbnn6aqk3oyyx6xixtxsw3oq",
"baglacgzaz6ajmdnij27zbfrxugyesam5i6m6cezxfveoxjadnolwjelszw4a",
"baglacgzaxzceixddm72q4dlup2gwlsoxfykcejxavmskrbravtwa5xcvnktq",
"bagmacgzavl6vwffg5wwncspbcc5go5vgktznx76kgqeqfputhuarce7soubq",
"baglacgzawksvmxhdtwfx7k5silyip4c3ojz255cast2bmycgzxozpb2rys7a",
"baglacgzaywze5wn2o5cvdrdekjdjeet3tt36r3wfzwpcop54iumbvrex6zpa",
"baglacgzakbsr5nin4suyz7r3xxzcxkuel6fghs6zrbw2yi5ez2xo7nloerpa",
"baglacgzay5ujimrt4qi2ksavtfjysqjsn5m6ysxizi6hg3gqhpnuj362d7nq",
"baglacgza7q5xdqz6fzvxprpesta5w763wrduopyahwxtpdd2mo5jx47qasoq",
"baglacgzaisv2zdtclyzxlffct55zevsfb6wxmu462ft7et5qahpdqrnmcsba",
"baglacgza5yyio2rxxtbrkpk7vvv2iyp7pfp4bkismdma3mk6qkxlhsiy4f2a",
"bagmacgzaugn6dwvyjeqblgmuhrlxoerqgrzpev6uhsmi5f752q7kfsdiuqxa",
"baglacgzaq4oyzbuduaeeg3ww6bzspstpbtcb7tiyswmaaymfpvao2hqwxcva",
"baglacgzabqho5affvmsfef3cnd4xsw66l42d6ena4g2xedujct6qsd7o4a2q",
"baglacgzapohhuiobc6gsqb2pcv5vb7fil3rfyeswr74os4dnzpg2zn337bka",
"baglacgzaovc4t2yesyqvzvdsybtp5k2y4tb6xy676gwnwsr5qoztogehxj4q",
"baglacgzami2ovudshhpsyi6vbuq5fycfgmv3hyx3bjacvlsxqc4chz6vgcda",
"bagmacgzafb27j6ni6j5vwm7kfxfwfuqau7m4raff5v44ulu77z5wwp2bpnaq",
"baglacgzaqw7dbrzdyxhjsdn22orpgfzxxwdqcf7hn7ugy4hl665cckc5oxja",
"baglacgza5psrwfh6u2vklqex6jigq5hjscatynwnge4z5y6xeztn4lo6h7ga",
"baglacgzauiscf2uzdir25zlogw4qpzwriy6mtgsyzl7omehok3jpmskk3knq",
"baglacgzas4zhiutice4t5if7jai4vedxkmo3adigxbrdpixm22b7kw5exsya",
"baglacgza3tax6aemhf6t2lqknaazzsksu2c4fjllgjx2izlkv47qmhzfgtwq",
"baglacgzakncmprlqvhlj4nfejd7odbude6hmeykm6wspwqpm7bg3xoqi5dxq",
"baglacgzaa5igkis4qk25v4ko6eryts6watdot3ark5uzlxm3o7j3izolxala",
"bagmacgzaomwzsxiv5cwrrjquk4ryb6z4u4xhuu5xhpznph2oyb53ixrsvvca",
"baglacgzafjhvq54vejfj2vrvtidr6nlt3e4azkw5jg6kdnr2dot6edm6mzsa",
"baglacgzasvs7p7bsxtnrb5fz25cx5gyh43tqja74ywrhwpmt27gnni4z3qda",
"baglacgzagrolvdnsflcwzcmqnbbyon3enber2hlamdf77kvhwousoyznwika",
"baglacgzahkj5ojwxjb4hjzi3klmnkngghkrknco7ddr3gb6a23fquoeladzq",
"baglacgza2zihxbb2gl2daaft5miumsjqbps3xgmip2r52ubrpii5zkpshpvq",
"baglacgzakhvmbzxior7nsroicglbhkbvts3weihhcrqqz54dhcgosaavgiea",
"baglacgzaqlswzpybvsbc3fqkr4iekizldlug3ak6qsuthtu5qtybmtij2lia",
"baglacgzaajspycacn5bhe4dpspprjoayo72z54wmrxz5n7m2g7of3eazijqq",
"baglacgzax7i3elt7nndzjenb5xkogpgelmcmmtn6lqp5v6kvyfqe7m5k5sya",
"bagmacgzauubmsoyzddcmmu2niwj24a5fui72cdv4gd73ocalff576jcg4qwq",
"baglacgzasqqcuuppbzjikphak2gz56fnuysk4vnlq6andul7yvwolmswisiq",
"baglacgzam2xbzezi7l6vlyicgx6i3kpiqceh5veonhmpa4pjny3eibaeolwq",
"baglacgzabirgkutruwdjfcpl6bkujicvpsixkwfjh5hmuy7xoamdysl23dsq",
"bagmacgzayktazfgfoa6a7g5ijetwofgbp4aphqxbok53sqoc7pfydslq2moa",
"baglacgzalvkdmoxvvqpflgq235nahqiw4xofhxzhuio2eljusr7uhrch7nnq",
"baglacgzazsxzdrr4wtg24th2crzvzt66fhg7dy3zppagpy2nn5eesdrsaq5a",
"baglacgza2vpmjbvshqsmj3qfuh2qfcx5kg654uhqbknb3ok25ppmhnfd35sa",
"baglacgzadcjenr5pr6xnfr6t7b64rnnfdv4h634k2zm2y34roiuuwpp75vga",
"bagmacgzau7hv4cknn43r7hxusbijdicen3yvpftldneg5zc2xmstgvhft2ra",
"baglacgza4fxgo45wl7zhyqula5ahuljoi6lreftfcwskipwmhrcejv35j42a",
"baglacgzasoghibkt6mikv6sjvnvv6zci47gjmnkumjzxhlei4tvq53e4jstq",
"baglacgzaivd7643lhy6s535ukinqa24onqywzkfnfhhi5r7uvawxtiw7urza",
"baglacgzaqwe44wrh2zpa7ogoka44yx6hox6w55jnndhymz4nerazqjgxedua",
"bagmacgzaha7rcryssphnazakbiunmc42bokxd5sgzrbo5cnilp3g2zt3vnxq",
"baglacgzab7lroi2stb2cmi6awpfpwpsl3bwawwvr64ijpng5dhz5nes5owgq",
"baglacgza6l4kyy7nsrg2lahabyhvclpuncic2sqtzvmefqofpuq5lnsdhmra",
"baglacgzacsbz24qw6iy2vviclvzaegksg22ryng66bhuxpj4dl6pcg32wzxq",
"baglacgzazrli3jvfluavjdjwgkt3qktktnuh6set2t7ib7hzhanobmwxwvla",
"baglacgzankthcaoqchi4el7hhhxyhmclkikkhyxy4grgexml7wyrnnch5bxq",
"bagmacgzaf2zl6rp5iq55dx4ln6oas4tkjrrffihxrfvbggqidy42p5sewoeq",
"baglacgzav7vn47ouq6zebmg3img7nmada6ag4hx25uouzqxttyptyudr46bq",
"bagmacgzasc5m55cldco577of6ixny4h6fggfrzpfeptodx67pw6g2zl7punq",
"baglacgzaerhefaw75qz4to3wkfrm53spfzrzaaz2ss3cbvikf7djipv5ql6a",
"baglacgzahax3xfs4df4ywelodmzk2zgztppqt6hu5vgihyntrd722dxixrra",
"baglacgzaeqyhcnkoumzym36selclrief3po2p4yj62juga6r7ueszzq7fsaq",
"baglacgza6oydtjhtene6qxdyfuiwjqmjbzn7c25nzhxez6bh3nvp2irj3xta",
"bagmacgzae3xnnb2gakf4g2plivvx2pxeowvbn42ol2vazgh55w44lhv4koya",
"baglacgza3esavhjnlbi5awux74zqkm2n7wybahq6gip4e6osxm6k22x2r7ea",
"baglacgzatxyuvssxlehlznynti47jiaoyj5kqevfdmu7yj4npmjr6l6uyhfq",
"bagmacgzattugdfyxhykoayz5xbgor3vdfrkfj3v6svdxsjkwis2fw4l6rbaq",
"baglacgzaf4sjbg7ya3pq737z7im3pmp5vubrly25hfkvea6n7pfapib63kyq",
"bagmacgzagkghv6zmldxt7dcbc6uoxuzw6gtb2jczcbt63hc2v2khs3fmtb6q",
"baglacgzavy2t2fxjdf7pgnx6dzz46eczpnjdwveeiihq5ev42guggtnivpxa",
"bagmacgzajkxbxnhzvomtm3vz3rtsokavrzinenk3anvvqwog6tg6byve76nq",
"baglacgzahkjgb63xoh6ke37ztl4npobu2gkyh3ae3jjii4daodh7utnujiqa",
"baglacgzacthcbn5p3sqfzmpzrndyhbcmneuptrfwr7s5disl54oz5nxm5s2q",
"baglacgzam24ldzjqb3puomhwshglrtjcyrcpkpva2wybbkltfws6tor5tp7a",
"baglacgzaqkecamlmyav757mjtk5ecnaglh6qnxy6bidzmkd6yksbcarz63ja",
"bagmacgzaquqfnzlnbsk5idejdyvjpchlahovlbrt3degno72rl4dc6htsymq",
"baglacgzaecczvtf4q7l2mhitw2tn4y26ysaolmicnoc542wkyvvrs47o7a3a",
"baglacgzavs7qjikqvxuxkpz5liqdyqrzaonkllqw6kd4lf2cxjltxxlgz2gq",
"baglacgzawwi2ftqcgz7numopfulozj6cp7ke3pyims3e5kbftljwnfxlfica",
"bagmacgzavhhx6zz2bphhn7kagmvp5bqbkqurbnen5jcosojtups6smg2lumq",
"bagmacgzao5vkivv2triaryb3qk4edkopf7a6qv4m7lgvzeavqbhk4mk7c75q",
"bagmacgzaolr6fbgupow3wcs4ufbb4elz2pvjbtaqpbnsnn2pxcub6d46qqma",
"bagmacgza3x3z3mfdnugicnf2cq54wva42r4vvgrlv2fmuc5cjogysy6cu56q",
"bagmacgzagatdibfm73qqhufragifh7zsid6oim6gtnyjqmlhgkc7uwehzzga",
"bagmacgzamsaplavqsdtlvhzyovqewgkyk26azgp6tfdbzz5ux3423eajsita",
"bagmacgzarsrnwni34m76ucixyqhwmzjzdoj4xyqzcepbbxzzg5kim7edr7dq",
"bagmacgza7dy7xmpxwsbntbqeqd7oxob76vfiw3wb5llbzr6s6joxyalft6oa",
"bagmacgzaxfz6yd2i64il66pwg2eeqv2vzpuh7hkmnazgxob4e2xwecacvaha",
"bagmacgzaxrdsjyn4vafqvzadwgre564iakz2owgrueiyjr7nh7evfwksnizq",
"bagmacgzaxqrzefztg4772fnaxzrwhela4py4iybnsucowa2ybg3jolflfdba",
"bagmacgza6ccvgsnpnp4ev7elzixnumoi56gfcon6deu65m62jotlncubrsya",
"bagmacgzayjy6dcno5mo3lvm5p7uh27lde656pt5drfqzafsfsgles7pdztpa",
"bagmacgza2ved5k3y3gr3yqiixnhlzwelsmbxmyknsvg4ci4jiltww5alcxma",
"bagmacgzamq3lujnpelx5hm2l6heowtohkwhuliyq6r34yty4hrurctkscnla",
"bagmacgza45idxjlztz32umn34eyqymjmuf5syw6mr6ry6jtgoxupcvgckfvq",
"bagmacgzafi3v5u4p4fgckxsrbf4u3zz64gfszz7pyihxhqio7ztn77yjwcqq",
"bagmacgzatjwpysdg24pamvqso3g4tjchz72pdxsqweyuubc2jrdeusscvmra",
"bagmacgzasj4lqrtjnu3scovz2iff5nblapntc46ojefc545s6ozwablz7rrq",
"bagmacgzas7lcbavos6lvsurhbzlpekgh35dgarm7nye26e7wwrooolwfbpnq",
"bagmacgzasmhzm736xpvahwm6jogaqeuieqsteffkfxfsq4gm6eb4q35a5d5a",
"bagmacgzaw4bsyt4rnl5koaclh3bkzwk6ez72sj6j5ghsks5a2r675l3tyytq",
"bagmacgzacmg7rh342shchhjofzwlwxrej2psqkf43jurovkweqpniytdzvha",
"bagmacgzacy2ji662bc7ppplvkyxlvjxqiwyo4j2ie4xtck6l2zwtbf2w3i7a",
"bagmacgza5ecbawirj6ojccw6zijnxoq75543fywirgps24qtzurn7zbravqq",
"bagmacgza2vdmjsrcpith2klzmzbqgjbcg5dcj3iqtm6zjbemlxagxlhk5z3a",
"bagmacgzae7ci4iimzrxac2dl4lkkdgotl4hb5dpwesunhil4cy56rbq2zvta",
"bagmacgzai7cz3jllwk7tjde52kror5ktrkjlsbfwmhh6kssctc4fq2f34scq",
"bagmacgzabu4xfmjm7dg6rf2fjjn62f57ilrchh3v4gbf62erabtzu5wm2gxq",
"bagmacgzanjgius6avm37j2fq46oahss3cw4g5ntlfjzf5sbtguzppyai6pta",
"bafkrwibagt3z4drtwcxgx34uquzaeg5m5miwvxzgczdyoa56y2yxgkprzq",
"baglacgza5n2ivltmbqypzfjptsvbzvlvhpbcbzlr7xj6xb7zaallj3q3bu4a",
"baglacgzal5gkbdbs4srzs7iostmji3r5gypmlubclwonqxdn5dkxfoyktheq",
"baglacgzaeggi6pqszfefbd2or7verp6bbz6b7ctkszxi6yalsypnivkrc47a",
"baglacgzawxfq5gj2pt53idroosz6eahmfmrwxuz5fpciiwmiuts7l4a6k2eq",
"baglacgzaj46wxqbpstd5eicctecpdxhffmbuenzqmd3bt5jdjykdr7aeo3aa",
"baglacgza7lwpiwksommncl7ofw4nqxcu7qse2aqhxizwuapds5mtxaa24ypq",
"baglacgza7wkyigp25224rkrivwellawayv3y3r4mobbqc6xxmgscxgiq3gea",
"baglacgzazrwcvecxj5bq6pyshnxvp35apsxcdtfzacvfbvsrnaa2vag4wnza",
"baglacgzabchzwz3pjqtrnx35rjav3gmxeh6sbw3l7mjpwrb6gbiz5r4ltcgq",
"baglacgzaokokv2ioov6fjlkgkufj4yrplnxdw47r4rqhighqnb354ea4jaaq",
"baglacgza5gcozkl7fbpnys3d7uqzmawqsuvic5lrti4hznllfferepgxojja",
"baglacgza34suygwx22xxdd2fynck4x6fjrrhoaxloeni45znn5ewpk3g7lea",
"baglacgzasrizkrumchv6zypcuhr5fmtz66ej5cnup5sjbapxpj27ttj3u5xq",
"baglacgzad3w24kle2itl3jm2kxq6cysoj4xoflsrhrw55msc6meagt6laetq",
"baglacgzazixckhuckariike5abthcbdjgmgz5rcysbuaucijz5d7a3avqvpa",
"baglacgzapdoq2uowvqcis3dlzxug57bwzas2dyhefu3f4frrqdz3yknzdxtq",
"baglacgzabbcknaso72duwyoeqd4i2gyghf4avilk565nkzduap6h5jwcosza",
}
)

525
test/fixture_chain_B.go Normal file
View File

@ -0,0 +1,525 @@
package test
import (
"math/big"
"github.com/cerc-io/eth-testing/chains/premerge1"
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
"github.com/cerc-io/plugeth-statediff/indexer/models"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
)
var ChainB = premerge1.ChainData
var ChainB_block1_Header = types.Header{
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Bloom: types.Bloom{},
Difficulty: big.NewInt(+2),
Number: big.NewInt(+1),
GasLimit: 4704588,
GasUsed: 0,
Time: 1492010458,
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
MixDigest: common.Hash{},
Nonce: types.BlockNonce{},
BaseFee: nil,
}
var chainB_block1_stateNodeRLP = []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
var chainB_block1_stateNodeCID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(chainB_block1_stateNodeRLP))
var block_stateNodeLeafKey = "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb"
var ChainB_block1_StateNodeIPLD = models.IPLDModel{
BlockNumber: ChainB_block1_Header.Number.String(),
Key: chainB_block1_stateNodeCID.String(),
Data: chainB_block1_stateNodeRLP,
}
var ChainB_block1_EmptyRootNodeRLP, _ = rlp.EncodeToBytes([]byte{})
var ChainB_block1_StateNode0 = models.StateNodeModel{
BlockNumber: ChainB_block1_Header.Number.String(),
HeaderID: ChainB_block1_Header.Hash().Hex(),
CID: chainB_block1_stateNodeCID.String(),
Diff: false,
Balance: "1000",
Nonce: 1,
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
Removed: false,
StateKey: block_stateNodeLeafKey,
}
var chainB_block1_storageNodeRLP = []byte{3, 111, 15, 5, 141, 92, 6, 120, 63, 191, 125, 121, 193, 230, 153, 7, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 2, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 32, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 245, 71, 140, 90, 255, 89, 131, 99, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
var chainB_block1_storageNodeCID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_block1_storageNodeRLP))
var ChainB_block1_StorageNodeIPLD = models.IPLDModel{
BlockNumber: ChainB_block1_Header.Number.String(),
Key: chainB_block1_storageNodeCID.String(),
Data: chainB_block1_storageNodeRLP,
}
var ChainB_block1_StorageNode0 = models.StorageNodeModel{
BlockNumber: ChainB_block1_Header.Number.String(),
HeaderID: ChainB_block1_Header.Hash().Hex(),
StateKey: block_stateNodeLeafKey,
StorageKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
Removed: false,
CID: chainB_block1_storageNodeCID.String(),
Diff: false,
Value: []byte{1},
}
// Header for last block at height 32
var ChainB_Block32_Header = types.Header{
ParentHash: common.HexToHash("0x6983c921c053d1f637449191379f61ba844013c71e5ebfacaff77f8a8bd97042"),
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
Root: common.HexToHash("0xeaa5866eb37e33fc3cfe1376b2ad7f465e7213c14e6834e1cfcef9552b2e5d5d"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Bloom: types.Bloom{},
Difficulty: big.NewInt(2),
Number: big.NewInt(32),
GasLimit: 8253773,
GasUsed: 0,
Time: 1658408469,
Extra: []byte{216, 131, 1, 10, 19, 132, 103, 101, 116, 104, 136, 103, 111, 49, 46, 49, 56, 46, 50, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 113, 250, 240, 25, 148, 32, 193, 94, 196, 10, 99, 63, 251, 130, 170, 0, 176, 201, 149, 55, 230, 58, 218, 112, 84, 153, 122, 83, 134, 52, 176, 99, 53, 54, 63, 12, 226, 81, 38, 176, 57, 117, 92, 205, 237, 81, 203, 232, 220, 228, 166, 254, 206, 136, 7, 253, 2, 61, 47, 217, 235, 24, 140, 92, 1},
MixDigest: common.Hash{},
Nonce: types.BlockNonce{},
BaseFee: nil,
}
// State nodes for all paths at height 32
// Total 7
var ChainB_Block32_stateNode0RLP = []byte{248, 145, 128, 128, 128, 160, 151, 6, 152, 177, 246, 151, 39, 79, 71, 219, 192, 153, 253, 0, 46, 66, 56, 238, 116, 176, 237, 244, 79, 132, 49, 29, 30, 82, 108, 53, 191, 204, 128, 128, 160, 46, 224, 200, 157, 30, 24, 225, 92, 222, 131, 123, 169, 124, 86, 228, 124, 79, 136, 236, 83, 185, 22, 67, 136, 5, 73, 46, 110, 136, 138, 101, 63, 128, 128, 160, 104, 220, 31, 84, 240, 26, 100, 148, 110, 49, 52, 120, 81, 119, 30, 251, 196, 107, 11, 134, 124, 238, 93, 61, 109, 109, 181, 208, 10, 189, 17, 92, 128, 128, 160, 171, 149, 11, 254, 75, 39, 224, 164, 133, 151, 153, 47, 109, 134, 15, 169, 139, 206, 132, 93, 220, 210, 0, 225, 235, 118, 121, 247, 173, 12, 135, 133, 128, 128, 128, 128}
var ChainB_Block32_stateNode0CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode0RLP))
var ChainB_Block32_stateNode1RLP = []byte{248, 81, 128, 128, 128, 160, 209, 34, 171, 171, 30, 147, 168, 199, 137, 152, 249, 118, 14, 166, 1, 169, 116, 224, 82, 196, 237, 83, 255, 188, 228, 197, 7, 178, 144, 137, 77, 55, 128, 128, 128, 128, 128, 160, 135, 96, 108, 173, 177, 63, 201, 196, 26, 204, 72, 118, 17, 30, 76, 117, 155, 63, 68, 187, 4, 249, 78, 69, 161, 82, 178, 234, 164, 48, 158, 173, 128, 128, 128, 128, 128, 128, 128}
var ChainB_Block32_stateNode1CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode1RLP))
var ChainB_Block32_stateNode2RLP = []byte{248, 105, 160, 32, 21, 58, 188, 102, 126, 135, 59, 96, 54, 200, 164, 107, 221, 132, 126, 42, 222, 63, 137, 185, 51, 28, 120, 239, 37, 83, 254, 161, 148, 197, 13, 184, 70, 248, 68, 1, 128, 160, 168, 127, 48, 6, 204, 116, 51, 247, 216, 182, 191, 182, 185, 124, 223, 202, 239, 15, 67, 91, 253, 165, 42, 2, 54, 10, 211, 250, 242, 149, 205, 139, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
var ChainB_Block32_stateNode2CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode2RLP))
var ChainB_Block32_stateNode3RLP = []byte{248, 105, 160, 32, 252, 41, 63, 199, 2, 228, 43, 156, 2, 63, 9, 72, 38, 84, 93, 180, 47, 192, 253, 242, 186, 3, 27, 181, 34, 213, 239, 145, 122, 110, 219, 184, 70, 248, 68, 1, 128, 160, 25, 80, 158, 144, 166, 222, 32, 247, 189, 42, 34, 60, 40, 240, 56, 105, 251, 184, 132, 209, 219, 59, 60, 16, 221, 204, 228, 74, 76, 113, 37, 226, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
var ChainB_Block32_stateNode3CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode3RLP))
var ChainB_Block32_stateNode4RLP = []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 10, 141, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}
var ChainB_Block32_stateNode4CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode4RLP))
var ChainB_Block32_stateNode5RLP = []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
var ChainB_Block32_stateNode5CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode5RLP))
var ChainB_Block32_stateNode6RLP = []byte{248, 105, 160, 58, 188, 94, 219, 48, 85, 131, 227, 63, 102, 50, 44, 238, 228, 48, 136, 170, 153, 39, 125, 167, 114, 254, 181, 5, 53, 18, 208, 58, 10, 112, 43, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
var ChainB_Block32_stateNode6CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode6RLP))
var ChainB_Block32_StateIPLDs = []models.IPLDModel{
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode0CID.String(),
Data: ChainB_Block32_stateNode0RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode1CID.String(),
Data: ChainB_Block32_stateNode1RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode2CID.String(),
Data: ChainB_Block32_stateNode2RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode3CID.String(),
Data: ChainB_Block32_stateNode3RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode4CID.String(),
Data: ChainB_Block32_stateNode4RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode5CID.String(),
Data: ChainB_Block32_stateNode5RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: ChainB_Block32_stateNode6CID.String(),
Data: ChainB_Block32_stateNode6RLP,
},
}
var ChainB_Block32_StateNodes = []models.StateNodeModel{
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
CID: ChainB_Block32_stateNode2CID.String(),
Diff: false,
Balance: "0",
Nonce: 1,
CodeHash: common.HexToHash("0xe0168c08741b4f7140b9d7b42626eca40557d30f58998ab95eba7d89a4c68dc0").Hex(),
StorageRoot: common.HexToHash("0xa87f3006cc7433f7d8b6bfb6b97cdfcaef0f435bfda52a02360ad3faf295cd8b").Hex(),
Removed: false,
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
CID: ChainB_Block32_stateNode3CID.String(),
Diff: false,
Balance: "1000",
Nonce: 1,
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
Removed: false,
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb",
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
CID: ChainB_Block32_stateNode4CID.String(),
Diff: false,
Balance: "1000",
Nonce: 1,
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
Removed: false,
StateKey: "0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7",
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
CID: ChainB_Block32_stateNode5CID.String(),
Diff: false,
Balance: "1000",
Nonce: 1,
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
Removed: false,
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
CID: ChainB_Block32_stateNode6CID.String(),
Diff: false,
Balance: "0",
Nonce: 1,
CodeHash: common.HexToHash("0x1d503a68ce8d245d7cd9435db72b4762727e7c69e530dac26d53144c0d669c82").Hex(),
StorageRoot: common.HexToHash("0x36ae6021f3ba7178bcdefed23f2804829a9c42f7825d5871904e2ffcae8c822d").Hex(),
Removed: false,
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
},
}
// Storage nodes for all paths at height 32
// Total 18
var chainB_Block32_storageNode0RLP = []byte{248, 145, 128, 128, 128, 128, 160, 46, 77, 227, 140, 57, 224, 108, 238, 40, 82, 145, 79, 210, 174, 54, 248, 0, 145, 137, 64, 229, 230, 148, 145, 250, 132, 89, 198, 8, 249, 245, 133, 128, 160, 146, 250, 117, 217, 106, 75, 51, 124, 196, 244, 29, 16, 47, 173, 5, 90, 86, 19, 15, 48, 179, 174, 60, 171, 112, 154, 92, 70, 232, 164, 141, 165, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
var chainB_Block32_storageNode0CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode0RLP))
var chainB_Block32_storageNode1RLP = []byte{248, 81, 160, 167, 145, 134, 15, 219, 140, 96, 62, 101, 242, 176, 129, 164, 160, 200, 221, 13, 1, 246, 167, 156, 45, 205, 192, 88, 236, 235, 80, 105, 178, 123, 2, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 18, 136, 22, 150, 26, 170, 67, 152, 182, 246, 95, 49, 193, 199, 219, 163, 97, 25, 243, 70, 126, 235, 163, 59, 44, 16, 37, 37, 247, 50, 229, 70, 128, 128}
var chainB_Block32_storageNode1CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode1RLP))
var chainB_Block32_storageNode2RLP = []byte{236, 160, 32, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
var chainB_Block32_storageNode2CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode2RLP))
var chainB_Block32_storageNode3RLP = []byte{226, 160, 32, 44, 236, 111, 71, 132, 84, 126, 80, 66, 161, 99, 128, 134, 227, 24, 137, 41, 243, 79, 60, 0, 5, 248, 222, 195, 102, 201, 110, 129, 149, 172, 100}
var chainB_Block32_storageNode3CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode3RLP))
var chainB_Block32_storageNode4RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 159, 255, 156}
var chainB_Block32_storageNode4CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode4RLP))
var chainB_Block32_storageNode5RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
var chainB_Block32_storageNode5CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode5RLP))
var chainB_Block32_storageNode6RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
var chainB_Block32_storageNode6CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode6RLP))
var chainB_Block32_storageNode7RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
var chainB_Block32_storageNode7CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode7RLP))
var chainB_Block32_storageNode8RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
var chainB_Block32_storageNode8CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode8RLP))
var chainB_Block32_storageNode9RLP = []byte{248, 145, 128, 128, 128, 128, 160, 145, 86, 15, 219, 52, 36, 164, 68, 160, 227, 156, 111, 1, 245, 112, 184, 187, 242, 26, 138, 8, 98, 129, 35, 57, 212, 165, 21, 204, 151, 229, 43, 128, 160, 250, 205, 84, 126, 141, 108, 126, 228, 162, 8, 238, 234, 141, 159, 232, 175, 70, 112, 207, 55, 165, 209, 107, 153, 54, 183, 60, 172, 194, 251, 66, 61, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
var chainB_Block32_storageNode9CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode9RLP))
var chainB_Block32_storageNode10RLP = []byte{236, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
var chainB_Block32_storageNode10CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode10RLP))
var chainB_Block32_storageNode11RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
var chainB_Block32_storageNode11CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode11RLP))
var chainB_Block32_storageNode12RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
var chainB_Block32_storageNode12CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode12RLP))
var chainB_Block32_storageNode13RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
var chainB_Block32_storageNode13CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode13RLP))
var chainB_Block32_storageNode14RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
var chainB_Block32_storageNode14CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode14RLP))
var chainB_Block32_storageNode15RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
var chainB_Block32_storageNode15CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode15RLP))
var chainB_Block32_storageNode16RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
var chainB_Block32_storageNode16CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode16RLP))
var chainB_Block32_storageNode17RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
var chainB_Block32_storageNode17CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode17RLP))
var ChainB_Block32_StorageIPLDs = []models.IPLDModel{
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode0CID.String(),
Data: chainB_Block32_storageNode0RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode1CID.String(),
Data: chainB_Block32_storageNode1RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode2CID.String(),
Data: chainB_Block32_storageNode2RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode3CID.String(),
Data: chainB_Block32_storageNode3RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode4CID.String(),
Data: chainB_Block32_storageNode4RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode5CID.String(),
Data: chainB_Block32_storageNode5RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode6CID.String(),
Data: chainB_Block32_storageNode6RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode7CID.String(),
Data: chainB_Block32_storageNode7RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode8CID.String(),
Data: chainB_Block32_storageNode8RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode9CID.String(),
Data: chainB_Block32_storageNode9RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode10CID.String(),
Data: chainB_Block32_storageNode10RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode11CID.String(),
Data: chainB_Block32_storageNode11RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode12CID.String(),
Data: chainB_Block32_storageNode12RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode13CID.String(),
Data: chainB_Block32_storageNode13RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode14CID.String(),
Data: chainB_Block32_storageNode14RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode15CID.String(),
Data: chainB_Block32_storageNode15RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode16CID.String(),
Data: chainB_Block32_storageNode16RLP,
},
{
BlockNumber: ChainB_Block32_Header.Number.String(),
Key: chainB_Block32_storageNode17CID.String(),
Data: chainB_Block32_storageNode17RLP,
},
}
var ChainB_Block32_StorageNodes = []models.StorageNodeModel{
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
CID: chainB_Block32_storageNode2CID.String(),
Value: []byte{137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
}, // 0
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x4e2cec6f4784547e5042a1638086e3188929f34f3c0005f8dec366c96e8195ac",
CID: chainB_Block32_storageNode3CID.String(),
Value: []byte{100},
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
}, // 1
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
CID: chainB_Block32_storageNode4CID.String(),
Value: []byte{137, 54, 53, 201, 173, 197, 222, 159, 255, 156},
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
}, // 2
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
CID: chainB_Block32_storageNode5CID.String(),
Value: []byte{},
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
}, // 3
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
CID: chainB_Block32_storageNode6CID.String(),
Value: []byte{160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6},
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
}, // 4
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
CID: chainB_Block32_storageNode7CID.String(),
Value: []byte{},
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
}, // 5
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
CID: chainB_Block32_storageNode8CID.String(),
Value: []byte{160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8},
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
}, // 6
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
CID: chainB_Block32_storageNode10CID.String(),
Value: []byte{},
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
}, // 7
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
CID: chainB_Block32_storageNode11CID.String(),
Value: []byte{},
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
}, // 8
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
CID: chainB_Block32_storageNode14CID.String(),
Value: []byte{'\x01'},
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
}, // 9
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
CID: chainB_Block32_storageNode15CID.String(),
Value: []byte{},
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
}, // 10
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
CID: chainB_Block32_storageNode16CID.String(),
Value: []byte{'\x04'},
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
}, // 11
{
BlockNumber: ChainB_Block32_Header.Number.String(),
HeaderID: ChainB_Block32_Header.Hash().Hex(),
Diff: false,
Removed: false,
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
CID: chainB_Block32_storageNode17CID.String(),
Value: []byte{},
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
}, // 12
}
// Contracts used in chainB
/*
pragma solidity ^0.8.0;
contract Test {
uint256 private count;
uint256 private count2;
event Increment(uint256 count);
constructor() {
count2 = 4;
}
function incrementCount() public returns (uint256) {
count = count + 1;
emit Increment(count);
return count;
}
function destroy() public {
selfdestruct(payable(msg.sender));
}
function deleteCount2() public {
count2 = 0;
}
}
*/
/*
pragma solidity ^0.8.0;
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
contract GLDToken is ERC20 {
constructor(uint256 initialSupply) ERC20("Gold", "GLD") {
_mint(msg.sender, initialSupply);
}
}
*/

View File

@ -1,59 +0,0 @@
package test
import (
"bytes"
"os"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
ethnode "github.com/ethereum/go-ethereum/statediff/indexer/node"
)
var (
DefaultNodeInfo = ethnode.Info{
ID: "test_nodeid",
ClientName: "test_client",
GenesisBlock: "TEST_GENESIS",
NetworkID: "test_network",
ChainID: 0,
}
DefaultPgConfig = postgres.Config{
Hostname: "localhost",
Port: 8077,
DatabaseName: "vulcanize_testing",
Username: "vdbm",
Password: "password",
MaxIdle: 0,
MaxConnLifetime: 0,
MaxConns: 4,
}
)
func NeedsDB(t *testing.T) {
t.Helper()
if os.Getenv("TEST_WITH_DB") == "" {
t.Skip("set TEST_WITH_DB to enable test")
}
}
func NoError(t *testing.T, err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
// ExpectEqual asserts the provided interfaces are deep equal
func ExpectEqual(t *testing.T, want, got interface{}) {
if !reflect.DeepEqual(want, got) {
t.Fatalf("Values not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
}
}
func ExpectEqualBytes(t *testing.T, want, got []byte) {
if !bytes.Equal(want, got) {
t.Fatalf("Bytes not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
}
}