Compare commits
49 Commits
rebase-1.1
...
v5
Author | SHA1 | Date | |
---|---|---|---|
9e483fc9f7 | |||
434c9f6b48 | |||
bb49906860 | |||
0c323433af | |||
b4367dff3b | |||
00141776bf | |||
4e0b481ea5 | |||
|
da02e5ac12 | ||
|
fe88e90181 | ||
768357293c | |||
|
dd86f02997 | ||
|
891fca89bd | ||
|
0cab03b98e | ||
|
81e286399b | ||
|
7e426b2ca5 | ||
|
98015c4c87 | ||
|
6de5b9e96c | ||
|
2dd9221467 | ||
|
ea4c1042c4 | ||
|
7af17b1851 | ||
|
4081787b03 | ||
|
3d8064ccbb | ||
|
fdb105b769 | ||
|
32b637671d | ||
|
4ee75a3371 | ||
82176ea41c | |||
|
0a04baab17 | ||
|
4245b80a4a | ||
|
22ecd4065a | ||
|
382ad92701 | ||
|
14b1180161 | ||
|
ead007f159 | ||
|
f1a980f37c | ||
|
f83ab82424 | ||
|
fb5a95d874 | ||
97ee99a449 | |||
|
b6d7695536 | ||
|
3f3e77cbac | ||
|
e63ffbf0ad | ||
|
6ede522ae0 | ||
|
5c13d59515 | ||
|
6fe54aa8b4 | ||
|
c9dd85488b | ||
|
ac85fe29eb | ||
c270f39de9 | |||
|
9ea78c4ecf | ||
|
989bd1c0f1 | ||
|
3f93a989dc | ||
|
be544a3424 |
143
.gitea/workflows/test.yml
Normal file
143
.gitea/workflows/test.yml
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
name: Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: '*'
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
env:
|
||||||
|
CANONICAL_VERSION: v5.0.4-alpha
|
||||||
|
ETH_TESTING_REF: v0.5.1
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build Docker image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Build docker image
|
||||||
|
run: docker build .
|
||||||
|
|
||||||
|
unit-test:
|
||||||
|
name: Run unit tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
check-latest: true
|
||||||
|
- name: Install test fixtures
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: cerc-io/eth-testing
|
||||||
|
path: ./fixtures
|
||||||
|
ref: ${{ env.ETH_TESTING_REF }}
|
||||||
|
- name: Run unit tests
|
||||||
|
run: make test
|
||||||
|
|
||||||
|
integration-test:
|
||||||
|
name: Run integration tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
check-latest: true
|
||||||
|
- name: Install test fixtures
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: cerc-io/eth-testing
|
||||||
|
path: ./fixtures
|
||||||
|
ref: ${{ env.ETH_TESTING_REF }}
|
||||||
|
- name: Build package
|
||||||
|
run: go build .
|
||||||
|
- name: Run DB container
|
||||||
|
run: docker compose -f test/compose.yml up --wait
|
||||||
|
|
||||||
|
# Run a sanity test against the fixture data
|
||||||
|
# Complete integration tests are TODO
|
||||||
|
- name: Run basic integration test
|
||||||
|
env:
|
||||||
|
SNAPSHOT_MODE: postgres
|
||||||
|
ETHDB_PATH: ./fixtures/chains/data/postmerge1/geth/chaindata
|
||||||
|
ETH_GENESIS_BLOCK: 0x66ef6002e201cfdb23bd3f615fcf41e59d8382055e5a836f8d4c2af0d484647c
|
||||||
|
SNAPSHOT_BLOCK_HEIGHT: 170
|
||||||
|
run: |
|
||||||
|
until
|
||||||
|
ready_query='select max(version_id) from goose_db_version;'
|
||||||
|
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -tA cerc_testing -U vdbm -c "$ready_query")
|
||||||
|
[[ "$version" -ge 21 ]]
|
||||||
|
do
|
||||||
|
echo "Waiting for ipld-eth-db..."
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
./ipld-eth-state-snapshot --config test/ci-config.toml stateSnapshot
|
||||||
|
|
||||||
|
count_results() {
|
||||||
|
query="select count(*) from $1;"
|
||||||
|
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -tA cerc_testing -U vdbm -c "$query"
|
||||||
|
}
|
||||||
|
set -x
|
||||||
|
[[ "$(count_results eth.header_cids)" = 1 ]]
|
||||||
|
[[ "$(count_results eth.state_cids)" = 264 ]]
|
||||||
|
[[ "$(count_results eth.storage_cids)" = 371 ]]
|
||||||
|
|
||||||
|
compliance-test:
|
||||||
|
name: Run compliance tests (disabled)
|
||||||
|
# Schema has been updated, so compliance tests are disabled until we have a meaningful way to
|
||||||
|
# compare to previous results.
|
||||||
|
if: false
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
path: ./ipld-eth-state-snapshot
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version-file: ./ipld-eth-state-snapshot/go.mod
|
||||||
|
check-latest: true
|
||||||
|
- name: Install test fixtures
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: cerc-io/eth-testing
|
||||||
|
path: ./fixtures
|
||||||
|
ref: ${{ env.ETH_TESTING_REF }}
|
||||||
|
- name: Build current version
|
||||||
|
working-directory: ./ipld-eth-state-snapshot
|
||||||
|
run: go build -o ../snapshot-current .
|
||||||
|
|
||||||
|
- name: Checkout canonical version
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
path: ./ipld-eth-state-snapshot-canonical
|
||||||
|
ref: ${{ env.CANONICAL_VERSION }}
|
||||||
|
- name: Build canonical version
|
||||||
|
working-directory: ./ipld-eth-state-snapshot-canonical
|
||||||
|
run: go build -o ../snapshot-canonical .
|
||||||
|
|
||||||
|
- name: Run DB container
|
||||||
|
working-directory: ./ipld-eth-state-snapshot
|
||||||
|
run: docker compose -f test/compose.yml up --wait
|
||||||
|
- name: Compare snapshot output
|
||||||
|
env:
|
||||||
|
SNAPSHOT_BLOCK_HEIGHT: 200
|
||||||
|
ETHDB_PATH: ./fixtures/chains/data/premerge2/geth/chaindata
|
||||||
|
ETHDB_ANCIENT: ./fixtures/chains/data/premerge2/geth/chaindata/ancient
|
||||||
|
ETH_GENESIS_BLOCK: "0x8a3c7cddacbd1ab4ec1b03805fa2a287f3a75e43d87f4f987fcc399f5c042614"
|
||||||
|
run: |
|
||||||
|
until
|
||||||
|
ready_query='select max(version_id) from goose_db_version;'
|
||||||
|
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -tA cerc_testing -U vdbm -c "$ready_query")
|
||||||
|
[[ "$version" -ge 21 ]]
|
||||||
|
do sleep 1; done
|
||||||
|
|
||||||
|
./ipld-eth-state-snapshot/scripts/compare-snapshots.sh \
|
||||||
|
./snapshot-canonical ./snapshot-current
|
30
.github/workflows/on-pr.yaml
vendored
30
.github/workflows/on-pr.yaml
vendored
@ -1,30 +0,0 @@
|
|||||||
name: Docker Build
|
|
||||||
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
name: Run unit tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
GOPATH: /tmp/go
|
|
||||||
GO111MODULE: on
|
|
||||||
steps:
|
|
||||||
- name: Create GOPATH
|
|
||||||
run: mkdir -p /tmp/go
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: ">=1.18.0"
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Run database
|
|
||||||
run: docker-compose up -d
|
|
||||||
|
|
||||||
- name: Run unit tests
|
|
||||||
run: |
|
|
||||||
sleep 45
|
|
||||||
make dbtest
|
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,6 +1,6 @@
|
|||||||
.idea/
|
.idea/
|
||||||
.vscode/
|
.vscode/
|
||||||
ipld-eth-state-snapshot
|
ipld-eth-state-snapshot
|
||||||
mocks/
|
|
||||||
.vscode
|
|
||||||
output_dir*/
|
output_dir*/
|
||||||
|
log_file
|
||||||
|
recovery_file
|
||||||
|
31
Dockerfile
Normal file
31
Dockerfile
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
FROM golang:1.21-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git gcc musl-dev binutils-gold
|
||||||
|
# DEBUG
|
||||||
|
RUN apk add busybox-extras
|
||||||
|
|
||||||
|
WORKDIR /ipld-eth-state-snapshot
|
||||||
|
|
||||||
|
ARG GIT_VDBTO_TOKEN
|
||||||
|
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
||||||
|
go mod download && \
|
||||||
|
rm -f ~/.gitconfig
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
RUN go build -ldflags '-extldflags "-static"' -o ipld-eth-state-snapshot .
|
||||||
|
|
||||||
|
FROM alpine
|
||||||
|
|
||||||
|
RUN apk --no-cache add su-exec bash
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY --from=builder /ipld-eth-state-snapshot/startup_script.sh .
|
||||||
|
COPY --from=builder /ipld-eth-state-snapshot/environments environments
|
||||||
|
|
||||||
|
# keep binaries immutable
|
||||||
|
COPY --from=builder /ipld-eth-state-snapshot/ipld-eth-state-snapshot ipld-eth-state-snapshot
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/startup_script.sh"]
|
31
Makefile
31
Makefile
@ -1,28 +1,13 @@
|
|||||||
BIN = $(GOPATH)/bin
|
MOCKGEN ?= mockgen
|
||||||
|
MOCKS_DIR := $(CURDIR)/internal/mocks
|
||||||
|
|
||||||
## Mockgen tool
|
mocks: $(MOCKS_DIR)/gen_indexer.go
|
||||||
MOCKGEN = $(BIN)/mockgen
|
.PHONY: mocks
|
||||||
$(BIN)/mockgen:
|
|
||||||
go install github.com/golang/mock/mockgen@v1.6.0
|
|
||||||
|
|
||||||
MOCKS_DIR = $(CURDIR)/mocks
|
$(MOCKS_DIR)/gen_indexer.go:
|
||||||
|
$(MOCKGEN) --package mocks --destination $@ \
|
||||||
.PHONY: mocks test
|
--mock_names Indexer=MockgenIndexer \
|
||||||
|
github.com/cerc-io/plugeth-statediff/indexer Indexer
|
||||||
mocks: $(MOCKGEN) mocks/snapshot/publisher.go
|
|
||||||
|
|
||||||
mocks/snapshot/publisher.go: pkg/types/publisher.go
|
|
||||||
$(MOCKGEN) -package snapshot_mock -destination $@ -source $< Publisher Tx
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f mocks/snapshot/publisher.go
|
|
||||||
|
|
||||||
build:
|
|
||||||
go fmt ./...
|
|
||||||
go build
|
|
||||||
|
|
||||||
test: mocks
|
test: mocks
|
||||||
go clean -testcache && go test -p 1 -v ./...
|
go clean -testcache && go test -p 1 -v ./...
|
||||||
|
|
||||||
dbtest: mocks
|
|
||||||
go clean -testcache && TEST_WITH_DB=true go test -p 1 -v ./...
|
|
||||||
|
56
README.md
56
README.md
@ -1,6 +1,6 @@
|
|||||||
# ipld-eth-state-snapshot
|
# ipld-eth-state-snapshot
|
||||||
|
|
||||||
> Tool for extracting the entire Ethereum state at a particular block height from leveldb into Postgres-backed IPFS
|
> Tool for extracting the entire Ethereum state at a particular block height from a cold database into Postgres-backed IPFS
|
||||||
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot)
|
||||||
|
|
||||||
@ -19,16 +19,16 @@ Config format:
|
|||||||
```toml
|
```toml
|
||||||
[snapshot]
|
[snapshot]
|
||||||
mode = "file" # indicates output mode <postgres | file>
|
mode = "file" # indicates output mode <postgres | file>
|
||||||
workers = 4 # degree of concurrency, the state trie is subdivided into sections that are traversed and processed concurrently
|
workers = 4 # degree of concurrency: the state trie is subdivided into sections that are traversed and processed concurrently
|
||||||
blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in leveldb)
|
blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in ethdb)
|
||||||
recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure
|
recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure
|
||||||
accounts = [] # list of accounts (addresses) to take the snapshot for # SNAPSHOT_ACCOUNTS
|
accounts = [] # list of accounts (addresses) to take the snapshot for # SNAPSHOT_ACCOUNTS
|
||||||
|
|
||||||
[leveldb]
|
[ethdb]
|
||||||
# path to geth leveldb
|
# path to geth ethdb
|
||||||
path = "/Users/user/Library/Ethereum/geth/chaindata" # LVL_DB_PATH
|
path = "/Users/user/Library/Ethereum/geth/chaindata" # ETHDB_PATH
|
||||||
# path to geth ancient database
|
# path to geth ancient database
|
||||||
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # ANCIENT_DB_PATH
|
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # ETHDB_ANCIENT
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
# when operating in 'postgres' output mode
|
# when operating in 'postgres' output mode
|
||||||
@ -65,9 +65,15 @@ Config format:
|
|||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> **Note:** previous versions of this service used different variable names. To update, change the following:
|
||||||
|
> * `LVL_DB_PATH`, `LEVELDB_PATH` => `ETHDB_PATH`
|
||||||
|
> * `ANCIENT_DB_PATH`, `LEVELDB_ANCIENT` => `ETHDB_ANCIENT`
|
||||||
|
> * `LOGRUS_LEVEL`, `LOGRUS_FILE` => `LOG_LEVEL`, `LOG_FILE`, etc.
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
* For state snapshot from LevelDB:
|
* For state snapshot from EthDB:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
|
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
|
||||||
@ -84,12 +90,6 @@ Config format:
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
* For in-place snapshot in the database:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./ipld-eth-state-snapshot inPlaceStateSnapshot --config={path to toml config file}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Monitoring
|
## Monitoring
|
||||||
|
|
||||||
* Enable metrics using config parameters `prom.metrics` and `prom.http`.
|
* Enable metrics using config parameters `prom.metrics` and `prom.http`.
|
||||||
@ -131,8 +131,8 @@ Config format:
|
|||||||
* Combine output from multiple workers and copy to post-processed output directory:
|
* Combine output from multiple workers and copy to post-processed output directory:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# public.blocks
|
# ipld.blocks
|
||||||
cat {output_dir,output_dir/*}/public.blocks.csv > output_dir/processed_output/combined-public.blocks.csv
|
cat {output_dir,output_dir/*}/ipld.blocks.csv > output_dir/processed_output/combined-ipld.blocks.csv
|
||||||
|
|
||||||
# eth.state_cids
|
# eth.state_cids
|
||||||
cat output_dir/*/eth.state_cids.csv > output_dir/processed_output/combined-eth.state_cids.csv
|
cat output_dir/*/eth.state_cids.csv > output_dir/processed_output/combined-eth.state_cids.csv
|
||||||
@ -150,8 +150,8 @@ Config format:
|
|||||||
* De-duplicate data:
|
* De-duplicate data:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# public.blocks
|
# ipld.blocks
|
||||||
sort -u output_dir/processed_output/combined-public.blocks.csv -o output_dir/processed_output/deduped-combined-public.blocks.csv
|
sort -u output_dir/processed_output/combined-ipld.blocks.csv -o output_dir/processed_output/deduped-combined-ipld.blocks.csv
|
||||||
|
|
||||||
# eth.header_cids
|
# eth.header_cids
|
||||||
sort -u output_dir/processed_output/eth.header_cids.csv -o output_dir/processed_output/deduped-eth.header_cids.csv
|
sort -u output_dir/processed_output/eth.header_cids.csv -o output_dir/processed_output/deduped-eth.header_cids.csv
|
||||||
@ -177,8 +177,8 @@ Config format:
|
|||||||
# public.nodes
|
# public.nodes
|
||||||
COPY public.nodes FROM '/output_dir/processed_output/public.nodes.csv' CSV;
|
COPY public.nodes FROM '/output_dir/processed_output/public.nodes.csv' CSV;
|
||||||
|
|
||||||
# public.blocks
|
# ipld.blocks
|
||||||
COPY public.blocks FROM '/output_dir/processed_output/deduped-combined-public.blocks.csv' CSV;
|
COPY ipld.blocks FROM '/output_dir/processed_output/deduped-combined-ipld.blocks.csv' CSV;
|
||||||
|
|
||||||
# eth.header_cids
|
# eth.header_cids
|
||||||
COPY eth.header_cids FROM '/output_dir/processed_output/deduped-eth.header_cids.csv' CSV;
|
COPY eth.header_cids FROM '/output_dir/processed_output/deduped-eth.header_cids.csv' CSV;
|
||||||
@ -191,3 +191,19 @@ Config format:
|
|||||||
```
|
```
|
||||||
|
|
||||||
* NOTE: `COPY` command on CSVs inserts empty strings as `NULL` in the DB. Passing `FORCE_NOT_NULL <COLUMN_NAME>` forces it to insert empty strings instead. This is required to maintain compatibility of the imported snapshot data with the data generated by statediffing. Reference: https://www.postgresql.org/docs/14/sql-copy.html
|
* NOTE: `COPY` command on CSVs inserts empty strings as `NULL` in the DB. Passing `FORCE_NOT_NULL <COLUMN_NAME>` forces it to insert empty strings instead. This is required to maintain compatibility of the imported snapshot data with the data generated by statediffing. Reference: https://www.postgresql.org/docs/14/sql-copy.html
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
* Run the following command to find any rows (in data dumps in `file` mode) having unexpected number of columns:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d true
|
||||||
|
```
|
||||||
|
|
||||||
|
* Run the following command to select rows (from data dumps in `file` mode) other than the ones having unexpected number of columns:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
* See [scripts](./scripts) for more details.
|
||||||
|
@ -1,63 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2022 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
|
|
||||||
)
|
|
||||||
|
|
||||||
// inPlaceStateSnapshotCmd represents the inPlaceStateSnapshot command
|
|
||||||
var inPlaceStateSnapshotCmd = &cobra.Command{
|
|
||||||
Use: "inPlaceStateSnapshot",
|
|
||||||
Short: "Take an in-place state snapshot in the database",
|
|
||||||
Long: `Usage:
|
|
||||||
|
|
||||||
./ipld-eth-state-snapshot inPlaceStateSnapshot --config={path to toml config file}`,
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
subCommand = cmd.CalledAs()
|
|
||||||
logWithCommand = *logrus.WithField("SubCommand", subCommand)
|
|
||||||
inPlaceStateSnapshot()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func inPlaceStateSnapshot() {
|
|
||||||
config := snapshot.NewInPlaceSnapshotConfig()
|
|
||||||
|
|
||||||
startHeight := viper.GetUint64(snapshot.SNAPSHOT_START_HEIGHT_TOML)
|
|
||||||
endHeight := viper.GetUint64(snapshot.SNAPSHOT_END_HEIGHT_TOML)
|
|
||||||
|
|
||||||
params := snapshot.InPlaceSnapshotParams{StartHeight: uint64(startHeight), EndHeight: uint64(endHeight)}
|
|
||||||
if err := snapshot.CreateInPlaceSnapshot(config, params); err != nil {
|
|
||||||
logWithCommand.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
logWithCommand.Infof("snapshot taken at height %d starting from height %d", endHeight, startHeight)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rootCmd.AddCommand(inPlaceStateSnapshotCmd)
|
|
||||||
|
|
||||||
inPlaceStateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_START_HEIGHT_CLI, "", "start block height for in-place snapshot")
|
|
||||||
inPlaceStateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_END_HEIGHT_CLI, "", "end block height for in-place snapshot")
|
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_START_HEIGHT_TOML, inPlaceStateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_START_HEIGHT_CLI))
|
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_END_HEIGHT_TOML, inPlaceStateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_END_HEIGHT_CLI))
|
|
||||||
}
|
|
21
cmd/root.go
21
cmd/root.go
@ -25,8 +25,8 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
|
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -42,14 +42,13 @@ var rootCmd = &cobra.Command{
|
|||||||
|
|
||||||
// Execute executes root Command.
|
// Execute executes root Command.
|
||||||
func Execute() {
|
func Execute() {
|
||||||
log.Info("----- Starting vDB -----")
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initFuncs(cmd *cobra.Command, args []string) {
|
func initFuncs(cmd *cobra.Command, args []string) {
|
||||||
logfile := viper.GetString(snapshot.LOGRUS_FILE_TOML)
|
logfile := viper.GetString(snapshot.LOG_FILE_TOML)
|
||||||
if logfile != "" {
|
if logfile != "" {
|
||||||
file, err := os.OpenFile(logfile,
|
file, err := os.OpenFile(logfile,
|
||||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||||
@ -68,7 +67,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if viper.GetBool(snapshot.PROM_METRICS_TOML) {
|
if viper.GetBool(snapshot.PROM_METRICS_TOML) {
|
||||||
log.Info("initializing prometheus metrics")
|
log.Info("Initializing prometheus metrics")
|
||||||
prom.Init()
|
prom.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,7 +83,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func logLevel() error {
|
func logLevel() error {
|
||||||
lvl, err := log.ParseLevel(viper.GetString(snapshot.LOGRUS_LEVEL_TOML))
|
lvl, err := log.ParseLevel(viper.GetString(snapshot.LOG_LEVEL_TOML))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -103,13 +102,13 @@ func init() {
|
|||||||
viper.AutomaticEnv()
|
viper.AutomaticEnv()
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
||||||
rootCmd.PersistentFlags().String(snapshot.LOGRUS_FILE_CLI, "", "file path for logging")
|
rootCmd.PersistentFlags().String(snapshot.LOG_FILE_CLI, "", "file path for logging")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name")
|
||||||
rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port")
|
rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password")
|
||||||
rootCmd.PersistentFlags().String(snapshot.LOGRUS_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
rootCmd.PersistentFlags().String(snapshot.LOG_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
||||||
|
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics")
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service")
|
||||||
@ -117,13 +116,13 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port")
|
rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port")
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats")
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.LOGRUS_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_FILE_CLI))
|
viper.BindPFlag(snapshot.LOG_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_FILE_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI))
|
viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI))
|
viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI))
|
viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI))
|
viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI))
|
viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI))
|
||||||
viper.BindPFlag(snapshot.LOGRUS_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_LEVEL_CLI))
|
viper.BindPFlag(snapshot.LOG_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_LEVEL_CLI))
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI))
|
viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI))
|
||||||
viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI))
|
viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI))
|
||||||
@ -138,7 +137,7 @@ func initConfig() {
|
|||||||
if err := viper.ReadInConfig(); err == nil {
|
if err := viper.ReadInConfig(); err == nil {
|
||||||
log.Printf("Using config file: %s", viper.ConfigFileUsed())
|
log.Printf("Using config file: %s", viper.ConfigFileUsed())
|
||||||
} else {
|
} else {
|
||||||
log.Fatal(fmt.Sprintf("Couldn't read config file: %s", err.Error()))
|
log.Fatalf("Couldn't read config file: %s", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("No config file passed with --config flag")
|
log.Warn("No config file passed with --config flag")
|
||||||
|
@ -16,19 +16,21 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
|
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// stateSnapshotCmd represents the stateSnapshot command
|
// stateSnapshotCmd represents the stateSnapshot command
|
||||||
var stateSnapshotCmd = &cobra.Command{
|
var stateSnapshotCmd = &cobra.Command{
|
||||||
Use: "stateSnapshot",
|
Use: "stateSnapshot",
|
||||||
Short: "Extract the entire Ethereum state from leveldb and publish into PG-IPFS",
|
Short: "Extract the entire Ethereum state from Ethdb and publish into PG-IPFS",
|
||||||
Long: `Usage
|
Long: `Usage
|
||||||
|
|
||||||
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`,
|
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`,
|
||||||
@ -40,15 +42,14 @@ var stateSnapshotCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stateSnapshot() {
|
func stateSnapshot() {
|
||||||
modeStr := viper.GetString(snapshot.SNAPSHOT_MODE_TOML)
|
mode := snapshot.SnapshotMode(viper.GetString(snapshot.SNAPSHOT_MODE_TOML))
|
||||||
mode := snapshot.SnapshotMode(modeStr)
|
|
||||||
config, err := snapshot.NewConfig(mode)
|
config, err := snapshot.NewConfig(mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatalf("unable to initialize config: %v", err)
|
logWithCommand.Fatalf("unable to initialize config: %v", err)
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("opening levelDB and ancient data at %s and %s",
|
logWithCommand.Infof("opening ethdb and ancient data at %s and %s",
|
||||||
config.Eth.LevelDBPath, config.Eth.AncientDBPath)
|
config.Eth.DBPath, config.Eth.AncientDBPath)
|
||||||
edb, err := snapshot.NewLevelDB(config.Eth)
|
edb, err := snapshot.NewEthDB(config.Eth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -59,12 +60,25 @@ func stateSnapshot() {
|
|||||||
logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile)
|
logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, err := snapshot.NewPublisher(mode, config)
|
var idxconfig indexer.Config
|
||||||
|
switch mode {
|
||||||
|
case snapshot.PgSnapshot:
|
||||||
|
idxconfig = *config.DB
|
||||||
|
case snapshot.FileSnapshot:
|
||||||
|
idxconfig = *config.File
|
||||||
|
}
|
||||||
|
_, indexer, err := indexer.NewStateDiffIndexer(
|
||||||
|
context.Background(),
|
||||||
|
nil, // ChainConfig is only used in PushBlock, which we don't call
|
||||||
|
config.Eth.NodeInfo,
|
||||||
|
idxconfig,
|
||||||
|
false,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotService, err := snapshot.NewSnapshotService(edb, pub, recoveryFile)
|
snapshotService, err := snapshot.NewSnapshotService(edb, indexer, recoveryFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -79,14 +93,14 @@ func stateSnapshot() {
|
|||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("state snapshot at height %d is complete", height)
|
logWithCommand.Infof("State snapshot at height %d is complete", height)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.AddCommand(stateSnapshotCmd)
|
rootCmd.AddCommand(stateSnapshotCmd)
|
||||||
|
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.LVL_DB_PATH_CLI, "", "path to primary datastore")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_PATH_CLI, "", "path to primary datastore")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.ANCIENT_DB_PATH_CLI, "", "path to ancient datastore")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_ANCIENT_CLI, "", "path to ancient datastore")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at")
|
||||||
stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use")
|
stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration")
|
||||||
@ -94,8 +108,8 @@ func init() {
|
|||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode")
|
||||||
stateSnapshotCmd.PersistentFlags().StringArray(snapshot.SNAPSHOT_ACCOUNTS_CLI, nil, "list of account addresses to limit snapshot to")
|
stateSnapshotCmd.PersistentFlags().StringArray(snapshot.SNAPSHOT_ACCOUNTS_CLI, nil, "list of account addresses to limit snapshot to")
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.LVL_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.LVL_DB_PATH_CLI))
|
viper.BindPFlag(snapshot.ETHDB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_PATH_CLI))
|
||||||
viper.BindPFlag(snapshot.ANCIENT_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ANCIENT_DB_PATH_CLI))
|
viper.BindPFlag(snapshot.ETHDB_ANCIENT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_ANCIENT_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI))
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE IF NOT EXISTS public.blocks (
|
|
||||||
key TEXT UNIQUE NOT NULL,
|
|
||||||
data BYTEA NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE public.blocks;
|
|
@ -1,12 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE nodes (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
client_name VARCHAR,
|
|
||||||
genesis_block VARCHAR(66),
|
|
||||||
network_id VARCHAR,
|
|
||||||
node_id VARCHAR(128),
|
|
||||||
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE nodes;
|
|
@ -1,5 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE SCHEMA eth;
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP SCHEMA eth;
|
|
@ -1,23 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.header_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
block_number BIGINT NOT NULL,
|
|
||||||
block_hash VARCHAR(66) NOT NULL,
|
|
||||||
parent_hash VARCHAR(66) NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
td NUMERIC NOT NULL,
|
|
||||||
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
|
|
||||||
reward NUMERIC NOT NULL,
|
|
||||||
state_root VARCHAR(66) NOT NULL,
|
|
||||||
tx_root VARCHAR(66) NOT NULL,
|
|
||||||
receipt_root VARCHAR(66) NOT NULL,
|
|
||||||
uncle_root VARCHAR(66) NOT NULL,
|
|
||||||
bloom BYTEA NOT NULL,
|
|
||||||
timestamp NUMERIC NOT NULL,
|
|
||||||
times_validated INTEGER NOT NULL DEFAULT 1,
|
|
||||||
UNIQUE (block_number, block_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.header_cids;
|
|
@ -1,14 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.uncle_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
block_hash VARCHAR(66) NOT NULL,
|
|
||||||
parent_hash VARCHAR(66) NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
reward NUMERIC NOT NULL,
|
|
||||||
UNIQUE (header_id, block_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.uncle_cids;
|
|
@ -1,15 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.transaction_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
tx_hash VARCHAR(66) NOT NULL,
|
|
||||||
index INTEGER NOT NULL,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
dst VARCHAR(66) NOT NULL,
|
|
||||||
src VARCHAR(66) NOT NULL,
|
|
||||||
UNIQUE (header_id, tx_hash)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.transaction_cids;
|
|
@ -1,18 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.receipt_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
contract VARCHAR(66),
|
|
||||||
contract_hash VARCHAR(66),
|
|
||||||
topic0s VARCHAR(66)[],
|
|
||||||
topic1s VARCHAR(66)[],
|
|
||||||
topic2s VARCHAR(66)[],
|
|
||||||
topic3s VARCHAR(66)[],
|
|
||||||
log_contracts VARCHAR(66)[],
|
|
||||||
UNIQUE (tx_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.receipt_cids;
|
|
@ -1,15 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.state_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
state_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
state_path BYTEA,
|
|
||||||
node_type INTEGER,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
UNIQUE (header_id, state_path)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.state_cids;
|
|
@ -1,15 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.storage_cids (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
storage_leaf_key VARCHAR(66),
|
|
||||||
cid TEXT NOT NULL,
|
|
||||||
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
|
||||||
storage_path BYTEA,
|
|
||||||
node_type INTEGER NOT NULL,
|
|
||||||
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
UNIQUE (state_id, storage_path)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.storage_cids;
|
|
@ -1,13 +0,0 @@
|
|||||||
-- +goose Up
|
|
||||||
CREATE TABLE eth.state_accounts (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE,
|
|
||||||
balance NUMERIC NOT NULL,
|
|
||||||
nonce INTEGER NOT NULL,
|
|
||||||
code_hash BYTEA NOT NULL,
|
|
||||||
storage_root VARCHAR(66) NOT NULL,
|
|
||||||
UNIQUE (state_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- +goose Down
|
|
||||||
DROP TABLE eth.state_accounts;
|
|
@ -1,16 +1,17 @@
|
|||||||
[database]
|
[database]
|
||||||
name = "vulcanize_public"
|
name = "cerc_testing"
|
||||||
hostname = "localhost"
|
hostname = "localhost"
|
||||||
port = 5432
|
port = 8077
|
||||||
user = "postgres"
|
user = "vdbm"
|
||||||
|
password = "password"
|
||||||
|
|
||||||
[leveldb]
|
[ethdb]
|
||||||
path = "/Users/iannorden/Library/Ethereum/geth/chaindata"
|
path = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data"
|
||||||
ancient = "/Users/iannorden/Library/Ethereum/geth/chaindata/ancient"
|
ancient = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data/ancient"
|
||||||
|
|
||||||
[log]
|
[log]
|
||||||
level = "info"
|
level = "info"
|
||||||
file = "log_file"
|
file = "" # Leave blank to output to stdout
|
||||||
|
|
||||||
[prom]
|
[prom]
|
||||||
metrics = true
|
metrics = true
|
||||||
@ -22,12 +23,9 @@
|
|||||||
[snapshot]
|
[snapshot]
|
||||||
mode = "file"
|
mode = "file"
|
||||||
workers = 4
|
workers = 4
|
||||||
blockHeight = -1
|
blockHeight = 32
|
||||||
recoveryFile = "recovery_file"
|
recoveryFile = "recovery_file"
|
||||||
|
|
||||||
startHeight = 1
|
|
||||||
endHeight = 12
|
|
||||||
|
|
||||||
[file]
|
[file]
|
||||||
outputDir = "output_dir/"
|
outputDir = "output_dir/"
|
||||||
|
|
||||||
|
7
fixture/.gitignore
vendored
7
fixture/.gitignore
vendored
@ -1,7 +0,0 @@
|
|||||||
*/*.log
|
|
||||||
*/CURRENT*
|
|
||||||
*/LOCK
|
|
||||||
*/LOG
|
|
||||||
*/MANIFEST-*
|
|
||||||
*/ancient/FLOCK
|
|
||||||
*/ancient/*.meta
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
<EFBFBD><01>
|
|
@ -1 +0,0 @@
|
|||||||
<EFBFBD><01>
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
<EFBFBD><01>
|
|
Binary file not shown.
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
<EFBFBD><01>
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
<EFBFBD><01>
|
|
@ -1,27 +0,0 @@
|
|||||||
package fixture
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: embed some mainnet data
|
|
||||||
// import "embed"
|
|
||||||
//_go:embed mainnet_data.tar.gz
|
|
||||||
|
|
||||||
// GetChainDataPath returns the absolute paths to chain data in 'fixture/' given the chain (chain | chain2)
|
|
||||||
func GetChainDataPath(chain string) (string, string) {
|
|
||||||
path := filepath.Join("..", "..", "fixture", chain)
|
|
||||||
|
|
||||||
chaindataPath, err := filepath.Abs(path)
|
|
||||||
if err != nil {
|
|
||||||
panic("cannot resolve path " + path)
|
|
||||||
}
|
|
||||||
ancientdataPath := filepath.Join(chaindataPath, "ancient")
|
|
||||||
|
|
||||||
if _, err := os.Stat(chaindataPath); err != nil {
|
|
||||||
panic("must populate chaindata at " + chaindataPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return chaindataPath, ancientdataPath
|
|
||||||
}
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,307 +0,0 @@
|
|||||||
// Copyright © 2022 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package fixture
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Block struct {
|
|
||||||
Hash common.Hash
|
|
||||||
Number *big.Int
|
|
||||||
StateNodes []snapt.Node
|
|
||||||
StorageNodes [][]snapt.Node
|
|
||||||
}
|
|
||||||
|
|
||||||
var InPlaceSnapshotBlocks = []Block{
|
|
||||||
// Genesis block
|
|
||||||
{
|
|
||||||
Hash: common.HexToHash("0xe1bdb963128f645aa674b52a8c7ce00704762f27e2a6896abebd7954878f40e4"),
|
|
||||||
Number: big.NewInt(0),
|
|
||||||
StateNodes: []snapt.Node{
|
|
||||||
// State node for main account with balance.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 119, 161, 32, 103, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 128, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// Contract Test1 deployed by main account.
|
|
||||||
{
|
|
||||||
Hash: common.HexToHash("0x46ce57b700e470d0c0820ede662ecc0d0c78cf87237cb12a40a7ff5ff9cc8ac5"),
|
|
||||||
Number: big.NewInt(1),
|
|
||||||
StateNodes: []snapt.Node{
|
|
||||||
// Branch root node.
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 81, 128, 128, 128, 128, 128, 128, 160, 173, 52, 73, 195, 118, 160, 81, 100, 138, 50, 127, 27, 188, 85, 147, 215, 187, 244, 219, 228, 93, 25, 72, 253, 160, 45, 16, 239, 130, 223, 160, 26, 128, 128, 160, 137, 52, 229, 60, 211, 96, 171, 177, 51, 19, 204, 180, 24, 252, 28, 70, 234, 7, 73, 20, 117, 230, 32, 223, 188, 6, 191, 75, 123, 64, 163, 197, 128, 128, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
// State node for sender account.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 1, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
// State node for deployment of contract Test1.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{9},
|
|
||||||
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
|
|
||||||
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 243, 143, 159, 99, 199, 96, 208, 136, 215, 221, 4, 247, 67, 97, 155, 98, 145, 246, 59, 238, 189, 139, 223, 83, 6, 40, 249, 14, 156, 250, 82, 215, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StorageNodes: [][]snapt.Node{
|
|
||||||
{},
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
// Storage node for contract Test1 state variable initialCount.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
|
||||||
Value: []byte{227, 161, 32, 177, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// Contract Test2 deployed by main account.
|
|
||||||
{
|
|
||||||
Hash: common.HexToHash("0xa848b156fe4e61d8dac0a833720794e8c58e93fa6db369af6f0d9a19ada9d723"),
|
|
||||||
Number: big.NewInt(2),
|
|
||||||
StateNodes: []snapt.Node{
|
|
||||||
// Branch root node.
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 81, 128, 128, 128, 128, 128, 128, 160, 191, 248, 9, 223, 101, 212, 255, 213, 196, 146, 160, 239, 69, 178, 134, 139, 81, 22, 255, 149, 90, 253, 178, 172, 102, 87, 249, 225, 224, 173, 183, 55, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
// State node for sender account.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 2, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
// State node for deployment of contract Test2.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{10},
|
|
||||||
Key: common.HexToHash("0xa44b5f4b47ded891709350af6a6e4d56602228a70279bdad4f0f64042445b4b9"),
|
|
||||||
Value: []byte{248, 105, 160, 52, 75, 95, 75, 71, 222, 216, 145, 112, 147, 80, 175, 106, 110, 77, 86, 96, 34, 40, 167, 2, 121, 189, 173, 79, 15, 100, 4, 36, 69, 180, 185, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 86, 36, 245, 233, 5, 167, 42, 118, 181, 35, 178, 216, 149, 56, 146, 147, 19, 8, 140, 137, 234, 0, 160, 27, 220, 33, 204, 6, 152, 239, 177, 52},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StorageNodes: [][]snapt.Node{
|
|
||||||
{},
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
// Storage node for contract Test2 state variable test.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// Increment contract Test1 state variable count using main account.
|
|
||||||
{
|
|
||||||
Hash: common.HexToHash("0x9fc4aaaab26f0b43ac609c99ae50925e5dc9a25f103c0511fcff38c6b3158302"),
|
|
||||||
Number: big.NewInt(3),
|
|
||||||
StateNodes: []snapt.Node{
|
|
||||||
// Branch root node.
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 113, 128, 128, 128, 128, 128, 128, 160, 70, 53, 190, 199, 124, 254, 86, 213, 42, 126, 117, 155, 2, 223, 56, 167, 130, 118, 10, 150, 65, 46, 207, 169, 167, 250, 209, 64, 37, 205, 153, 51, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 160, 214, 109, 199, 206, 145, 11, 213, 44, 206, 214, 36, 181, 134, 92, 243, 178, 58, 88, 158, 42, 31, 125, 71, 148, 188, 122, 252, 100, 250, 182, 85, 159, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
// State node for sender account.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 3, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
// State node for contract Test1 transaction.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{9},
|
|
||||||
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
|
|
||||||
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 167, 171, 204, 110, 30, 52, 74, 189, 215, 97, 245, 227, 176, 141, 250, 205, 8, 182, 138, 101, 51, 150, 155, 174, 234, 246, 30, 128, 253, 230, 36, 228, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
StorageNodes: [][]snapt.Node{
|
|
||||||
{},
|
|
||||||
{},
|
|
||||||
{
|
|
||||||
// Branch root node.
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 244, 152, 74, 17, 246, 26, 41, 33, 69, 97, 65, 223, 136, 222, 110, 26, 113, 13, 40, 104, 27, 145, 175, 121, 76, 90, 114, 30, 71, 131, 156, 215, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
// Storage node for contract Test1 state variable count.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{2},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
// Storage node for contract Test1 state variable initialCount.
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{11},
|
|
||||||
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
|
||||||
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expected state nodes at snapshot height.
|
|
||||||
var ExpectedStateNodes = []snapt.Node{
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 113, 128, 128, 128, 128, 128, 128, 160, 70, 53, 190, 199, 124, 254, 86, 213, 42, 126, 117, 155, 2, 223, 56, 167, 130, 118, 10, 150, 65, 46, 207, 169, 167, 250, 209, 64, 37, 205, 153, 51, 128, 128, 160, 165, 200, 234, 64, 112, 157, 130, 31, 236, 38, 20, 68, 99, 247, 81, 161, 76, 62, 186, 246, 84, 121, 39, 155, 102, 134, 188, 109, 89, 220, 31, 212, 128, 128, 160, 214, 109, 199, 206, 145, 11, 213, 44, 206, 214, 36, 181, 134, 92, 243, 178, 58, 88, 158, 42, 31, 125, 71, 148, 188, 122, 252, 100, 250, 182, 85, 159, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 3, 141, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{9},
|
|
||||||
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
|
|
||||||
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 167, 171, 204, 110, 30, 52, 74, 189, 215, 97, 245, 227, 176, 141, 250, 205, 8, 182, 138, 101, 51, 150, 155, 174, 234, 246, 30, 128, 253, 230, 36, 228, 160, 47, 62, 207, 242, 160, 167, 130, 233, 6, 187, 196, 80, 96, 6, 188, 150, 74, 176, 201, 7, 65, 32, 174, 97, 1, 76, 26, 86, 141, 49, 62, 214},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{10},
|
|
||||||
Key: common.HexToHash("0xa44b5f4b47ded891709350af6a6e4d56602228a70279bdad4f0f64042445b4b9"),
|
|
||||||
Value: []byte{248, 105, 160, 52, 75, 95, 75, 71, 222, 216, 145, 112, 147, 80, 175, 106, 110, 77, 86, 96, 34, 40, 167, 2, 121, 189, 173, 79, 15, 100, 4, 36, 69, 180, 185, 184, 70, 248, 68, 1, 128, 160, 130, 30, 37, 86, 162, 144, 200, 100, 5, 248, 22, 10, 45, 102, 32, 66, 164, 49, 186, 69, 107, 157, 178, 101, 199, 155, 184, 55, 192, 75, 229, 240, 160, 86, 36, 245, 233, 5, 167, 42, 118, 181, 35, 178, 216, 149, 56, 146, 147, 19, 8, 140, 137, 234, 0, 160, 27, 220, 33, 204, 6, 152, 239, 177, 52},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type StorageNodeWithState struct {
|
|
||||||
snapt.Node
|
|
||||||
StatePath []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expected storage nodes at snapshot height.
|
|
||||||
var ExpectedStorageNodes = []StorageNodeWithState{
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 244, 152, 74, 17, 246, 26, 41, 33, 69, 97, 65, 223, 136, 222, 110, 26, 113, 13, 40, 104, 27, 145, 175, 121, 76, 90, 114, 30, 71, 131, 156, 215, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{2},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{11},
|
|
||||||
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
|
||||||
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 1},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{227, 161, 32, 41, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
StatePath: []byte{10},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block header at snapshot height.
|
|
||||||
// Required in database when executing inPlaceStateSnapshot.
|
|
||||||
var Block4_Header = types.Header{
|
|
||||||
ParentHash: common.HexToHash("0x9fc4aaaab26f0b43ac609c99ae50925e5dc9a25f103c0511fcff38c6b3158302"),
|
|
||||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
|
||||||
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
|
||||||
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
|
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
Difficulty: big.NewInt(+2),
|
|
||||||
Number: big.NewInt(4),
|
|
||||||
GasLimit: 4704588,
|
|
||||||
GasUsed: 0,
|
|
||||||
Time: 1492010458,
|
|
||||||
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
|
|
||||||
MixDigest: common.Hash{},
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
BaseFee: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
contract Test1 {
|
|
||||||
uint256 private count;
|
|
||||||
uint256 private initialCount;
|
|
||||||
|
|
||||||
event Increment(uint256 count);
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
initialCount = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
function incrementCount() public returns (uint256) {
|
|
||||||
count = count + 1;
|
|
||||||
emit Increment(count);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
contract Test2 {
|
|
||||||
uint256 private test;
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
test = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
@ -1,359 +0,0 @@
|
|||||||
package fixture
|
|
||||||
|
|
||||||
var Block1_StateNodePaths = [][]byte{
|
|
||||||
[]byte{},
|
|
||||||
[]byte{0},
|
|
||||||
[]byte{0, 0},
|
|
||||||
[]byte{0, 2},
|
|
||||||
[]byte{0, 2, 1},
|
|
||||||
[]byte{0, 2, 8},
|
|
||||||
[]byte{0, 2, 12},
|
|
||||||
[]byte{0, 3},
|
|
||||||
[]byte{0, 4},
|
|
||||||
[]byte{0, 6},
|
|
||||||
[]byte{0, 6, 3},
|
|
||||||
[]byte{0, 6, 13},
|
|
||||||
[]byte{0, 7},
|
|
||||||
[]byte{0, 8},
|
|
||||||
[]byte{0, 8, 7},
|
|
||||||
[]byte{0, 8, 11},
|
|
||||||
[]byte{0, 9},
|
|
||||||
[]byte{0, 9, 9},
|
|
||||||
[]byte{0, 9, 10},
|
|
||||||
[]byte{0, 12},
|
|
||||||
[]byte{0, 13},
|
|
||||||
[]byte{0, 14},
|
|
||||||
[]byte{1},
|
|
||||||
[]byte{1, 2},
|
|
||||||
[]byte{1, 2, 5},
|
|
||||||
[]byte{1, 2, 7},
|
|
||||||
[]byte{1, 3},
|
|
||||||
[]byte{1, 3, 1},
|
|
||||||
[]byte{1, 3, 11},
|
|
||||||
[]byte{1, 4},
|
|
||||||
[]byte{1, 5},
|
|
||||||
[]byte{1, 5, 11},
|
|
||||||
[]byte{1, 5, 12},
|
|
||||||
[]byte{1, 5, 15},
|
|
||||||
[]byte{1, 6},
|
|
||||||
[]byte{1, 8},
|
|
||||||
[]byte{1, 10},
|
|
||||||
[]byte{1, 13},
|
|
||||||
[]byte{1, 14},
|
|
||||||
[]byte{1, 14, 2},
|
|
||||||
[]byte{1, 14, 11},
|
|
||||||
[]byte{1, 15},
|
|
||||||
[]byte{1, 15, 9},
|
|
||||||
[]byte{1, 15, 15},
|
|
||||||
[]byte{2},
|
|
||||||
[]byte{2, 0},
|
|
||||||
[]byte{2, 0, 9},
|
|
||||||
[]byte{2, 0, 14},
|
|
||||||
[]byte{2, 1},
|
|
||||||
[]byte{2, 1, 1},
|
|
||||||
[]byte{2, 1, 3},
|
|
||||||
[]byte{2, 1, 14},
|
|
||||||
[]byte{2, 5},
|
|
||||||
[]byte{2, 6},
|
|
||||||
[]byte{2, 9},
|
|
||||||
[]byte{2, 9, 1},
|
|
||||||
[]byte{2, 9, 7},
|
|
||||||
[]byte{2, 11},
|
|
||||||
[]byte{2, 11, 7},
|
|
||||||
[]byte{2, 11, 13},
|
|
||||||
[]byte{2, 13},
|
|
||||||
[]byte{2, 13, 1},
|
|
||||||
[]byte{2, 13, 15},
|
|
||||||
[]byte{2, 15},
|
|
||||||
[]byte{3},
|
|
||||||
[]byte{3, 0},
|
|
||||||
[]byte{3, 0, 0},
|
|
||||||
[]byte{3, 0, 1},
|
|
||||||
[]byte{3, 2},
|
|
||||||
[]byte{3, 2, 3},
|
|
||||||
[]byte{3, 2, 15},
|
|
||||||
[]byte{3, 3},
|
|
||||||
[]byte{3, 4},
|
|
||||||
[]byte{3, 4, 2},
|
|
||||||
[]byte{3, 4, 4},
|
|
||||||
[]byte{3, 4, 5},
|
|
||||||
[]byte{3, 6},
|
|
||||||
[]byte{3, 8},
|
|
||||||
[]byte{3, 9},
|
|
||||||
[]byte{3, 10},
|
|
||||||
[]byte{3, 10, 2},
|
|
||||||
[]byte{3, 10, 8},
|
|
||||||
[]byte{3, 10, 12},
|
|
||||||
[]byte{3, 11},
|
|
||||||
[]byte{3, 12},
|
|
||||||
[]byte{3, 13},
|
|
||||||
[]byte{3, 14},
|
|
||||||
[]byte{3, 14, 4},
|
|
||||||
[]byte{3, 14, 9},
|
|
||||||
[]byte{3, 14, 14},
|
|
||||||
[]byte{3, 14, 14, 10},
|
|
||||||
[]byte{3, 14, 14, 15},
|
|
||||||
[]byte{4},
|
|
||||||
[]byte{4, 0},
|
|
||||||
[]byte{4, 0, 6},
|
|
||||||
[]byte{4, 0, 15},
|
|
||||||
[]byte{4, 1},
|
|
||||||
[]byte{4, 2},
|
|
||||||
[]byte{4, 2, 1},
|
|
||||||
[]byte{4, 2, 11},
|
|
||||||
[]byte{4, 3},
|
|
||||||
[]byte{4, 5},
|
|
||||||
[]byte{4, 6},
|
|
||||||
[]byte{4, 7},
|
|
||||||
[]byte{4, 8},
|
|
||||||
[]byte{4, 11},
|
|
||||||
[]byte{4, 11, 6},
|
|
||||||
[]byte{4, 11, 9},
|
|
||||||
[]byte{4, 11, 12},
|
|
||||||
[]byte{4, 14},
|
|
||||||
[]byte{5},
|
|
||||||
[]byte{5, 0},
|
|
||||||
[]byte{5, 0, 3},
|
|
||||||
[]byte{5, 0, 9},
|
|
||||||
[]byte{5, 0, 15},
|
|
||||||
[]byte{5, 1},
|
|
||||||
[]byte{5, 1, 14},
|
|
||||||
[]byte{5, 1, 15},
|
|
||||||
[]byte{5, 2},
|
|
||||||
[]byte{5, 2, 8},
|
|
||||||
[]byte{5, 2, 10},
|
|
||||||
[]byte{5, 3},
|
|
||||||
[]byte{5, 4},
|
|
||||||
[]byte{5, 4, 6},
|
|
||||||
[]byte{5, 4, 12},
|
|
||||||
[]byte{5, 6},
|
|
||||||
[]byte{5, 8},
|
|
||||||
[]byte{5, 8, 3},
|
|
||||||
[]byte{5, 8, 11},
|
|
||||||
[]byte{5, 10},
|
|
||||||
[]byte{5, 11},
|
|
||||||
[]byte{5, 12},
|
|
||||||
[]byte{5, 13},
|
|
||||||
[]byte{5, 15},
|
|
||||||
[]byte{6},
|
|
||||||
[]byte{6, 0},
|
|
||||||
[]byte{6, 2},
|
|
||||||
[]byte{6, 2, 3},
|
|
||||||
[]byte{6, 2, 9},
|
|
||||||
[]byte{6, 4},
|
|
||||||
[]byte{6, 4, 0},
|
|
||||||
[]byte{6, 4, 0, 0},
|
|
||||||
[]byte{6, 4, 0, 5},
|
|
||||||
[]byte{6, 5},
|
|
||||||
[]byte{6, 5, 4},
|
|
||||||
[]byte{6, 5, 10},
|
|
||||||
[]byte{6, 5, 12},
|
|
||||||
[]byte{6, 5, 13},
|
|
||||||
[]byte{6, 6},
|
|
||||||
[]byte{6, 6, 0},
|
|
||||||
[]byte{6, 6, 8},
|
|
||||||
[]byte{6, 8},
|
|
||||||
[]byte{6, 8, 4},
|
|
||||||
[]byte{6, 8, 4, 2},
|
|
||||||
[]byte{6, 8, 4, 9},
|
|
||||||
[]byte{6, 8, 9},
|
|
||||||
[]byte{6, 10},
|
|
||||||
[]byte{6, 10, 1},
|
|
||||||
[]byte{6, 10, 14},
|
|
||||||
[]byte{6, 11},
|
|
||||||
[]byte{6, 11, 2},
|
|
||||||
[]byte{6, 11, 12},
|
|
||||||
[]byte{6, 11, 14},
|
|
||||||
[]byte{6, 13},
|
|
||||||
[]byte{6, 13, 2},
|
|
||||||
[]byte{6, 13, 12},
|
|
||||||
[]byte{7},
|
|
||||||
[]byte{7, 1},
|
|
||||||
[]byte{7, 5},
|
|
||||||
[]byte{7, 7},
|
|
||||||
[]byte{7, 8},
|
|
||||||
[]byte{7, 8, 2},
|
|
||||||
[]byte{7, 8, 5},
|
|
||||||
[]byte{7, 9},
|
|
||||||
[]byte{7, 13},
|
|
||||||
[]byte{7, 13, 1},
|
|
||||||
[]byte{7, 13, 1, 0},
|
|
||||||
[]byte{7, 13, 1, 13},
|
|
||||||
[]byte{7, 13, 7},
|
|
||||||
[]byte{7, 14},
|
|
||||||
[]byte{7, 14, 8},
|
|
||||||
[]byte{7, 14, 11},
|
|
||||||
[]byte{8},
|
|
||||||
[]byte{8, 0},
|
|
||||||
[]byte{8, 0, 3},
|
|
||||||
[]byte{8, 0, 11},
|
|
||||||
[]byte{8, 2},
|
|
||||||
[]byte{8, 4},
|
|
||||||
[]byte{8, 8},
|
|
||||||
[]byte{8, 9},
|
|
||||||
[]byte{8, 9, 3},
|
|
||||||
[]byte{8, 9, 13},
|
|
||||||
[]byte{8, 10},
|
|
||||||
[]byte{8, 12},
|
|
||||||
[]byte{8, 12, 3},
|
|
||||||
[]byte{8, 12, 15},
|
|
||||||
[]byte{8, 13},
|
|
||||||
[]byte{8, 15},
|
|
||||||
[]byte{8, 15, 8},
|
|
||||||
[]byte{8, 15, 13},
|
|
||||||
[]byte{9},
|
|
||||||
[]byte{9, 0},
|
|
||||||
[]byte{9, 5},
|
|
||||||
[]byte{9, 6},
|
|
||||||
[]byte{9, 6, 10},
|
|
||||||
[]byte{9, 6, 14},
|
|
||||||
[]byte{9, 7},
|
|
||||||
[]byte{9, 9},
|
|
||||||
[]byte{9, 14},
|
|
||||||
[]byte{9, 15},
|
|
||||||
[]byte{9, 15, 0},
|
|
||||||
[]byte{9, 15, 4},
|
|
||||||
[]byte{9, 15, 10},
|
|
||||||
[]byte{10},
|
|
||||||
[]byte{10, 0},
|
|
||||||
[]byte{10, 0, 9},
|
|
||||||
[]byte{10, 0, 10},
|
|
||||||
[]byte{10, 0, 15},
|
|
||||||
[]byte{10, 2},
|
|
||||||
[]byte{10, 3},
|
|
||||||
[]byte{10, 6},
|
|
||||||
[]byte{10, 8},
|
|
||||||
[]byte{10, 9},
|
|
||||||
[]byte{10, 10},
|
|
||||||
[]byte{10, 10, 5},
|
|
||||||
[]byte{10, 10, 8},
|
|
||||||
[]byte{10, 13},
|
|
||||||
[]byte{10, 13, 0},
|
|
||||||
[]byte{10, 13, 13},
|
|
||||||
[]byte{10, 14},
|
|
||||||
[]byte{10, 14, 4},
|
|
||||||
[]byte{10, 14, 11},
|
|
||||||
[]byte{10, 14, 11, 8},
|
|
||||||
[]byte{10, 14, 11, 14},
|
|
||||||
[]byte{10, 15},
|
|
||||||
[]byte{11},
|
|
||||||
[]byte{11, 0},
|
|
||||||
[]byte{11, 0, 2},
|
|
||||||
[]byte{11, 0, 15},
|
|
||||||
[]byte{11, 1},
|
|
||||||
[]byte{11, 2},
|
|
||||||
[]byte{11, 3},
|
|
||||||
[]byte{11, 4},
|
|
||||||
[]byte{11, 5},
|
|
||||||
[]byte{11, 7},
|
|
||||||
[]byte{11, 7, 12},
|
|
||||||
[]byte{11, 7, 15},
|
|
||||||
[]byte{11, 8},
|
|
||||||
[]byte{11, 8, 8},
|
|
||||||
[]byte{11, 8, 15},
|
|
||||||
[]byte{11, 9},
|
|
||||||
[]byte{11, 11},
|
|
||||||
[]byte{11, 12},
|
|
||||||
[]byte{11, 13},
|
|
||||||
[]byte{11, 14},
|
|
||||||
[]byte{11, 14, 0},
|
|
||||||
[]byte{11, 14, 0, 1},
|
|
||||||
[]byte{11, 14, 0, 3},
|
|
||||||
[]byte{11, 14, 8},
|
|
||||||
[]byte{11, 14, 13},
|
|
||||||
[]byte{12},
|
|
||||||
[]byte{12, 0},
|
|
||||||
[]byte{12, 0, 0},
|
|
||||||
[]byte{12, 0, 1},
|
|
||||||
[]byte{12, 0, 1, 3},
|
|
||||||
[]byte{12, 0, 1, 11},
|
|
||||||
[]byte{12, 0, 15},
|
|
||||||
[]byte{12, 2},
|
|
||||||
[]byte{12, 2, 9},
|
|
||||||
[]byte{12, 2, 12},
|
|
||||||
[]byte{12, 4},
|
|
||||||
[]byte{12, 5},
|
|
||||||
[]byte{12, 6},
|
|
||||||
[]byte{12, 6, 0},
|
|
||||||
[]byte{12, 6, 4},
|
|
||||||
[]byte{12, 6, 14},
|
|
||||||
[]byte{12, 7},
|
|
||||||
[]byte{12, 7, 0},
|
|
||||||
[]byte{12, 7, 12},
|
|
||||||
[]byte{12, 7, 13},
|
|
||||||
[]byte{12, 9},
|
|
||||||
[]byte{12, 11},
|
|
||||||
[]byte{12, 12},
|
|
||||||
[]byte{13},
|
|
||||||
[]byte{13, 2},
|
|
||||||
[]byte{13, 2, 0},
|
|
||||||
[]byte{13, 2, 2},
|
|
||||||
[]byte{13, 2, 4},
|
|
||||||
[]byte{13, 3},
|
|
||||||
[]byte{13, 3, 7},
|
|
||||||
[]byte{13, 3, 10},
|
|
||||||
[]byte{13, 5},
|
|
||||||
[]byte{13, 8},
|
|
||||||
[]byte{13, 8, 1},
|
|
||||||
[]byte{13, 8, 15},
|
|
||||||
[]byte{13, 9},
|
|
||||||
[]byte{13, 9, 0},
|
|
||||||
[]byte{13, 9, 14},
|
|
||||||
[]byte{13, 10},
|
|
||||||
[]byte{13, 12},
|
|
||||||
[]byte{13, 12, 8},
|
|
||||||
[]byte{13, 12, 11},
|
|
||||||
[]byte{13, 13},
|
|
||||||
[]byte{13, 13, 7},
|
|
||||||
[]byte{13, 13, 12},
|
|
||||||
[]byte{13, 14},
|
|
||||||
[]byte{14},
|
|
||||||
[]byte{14, 0},
|
|
||||||
[]byte{14, 1},
|
|
||||||
[]byte{14, 2},
|
|
||||||
[]byte{14, 2, 2},
|
|
||||||
[]byte{14, 2, 12},
|
|
||||||
[]byte{14, 3},
|
|
||||||
[]byte{14, 4},
|
|
||||||
[]byte{14, 5},
|
|
||||||
[]byte{14, 6},
|
|
||||||
[]byte{14, 6, 9},
|
|
||||||
[]byte{14, 6, 12},
|
|
||||||
[]byte{14, 7},
|
|
||||||
[]byte{14, 7, 4},
|
|
||||||
[]byte{14, 7, 12},
|
|
||||||
[]byte{14, 8},
|
|
||||||
[]byte{14, 8, 3},
|
|
||||||
[]byte{14, 8, 12},
|
|
||||||
[]byte{14, 8, 12, 0},
|
|
||||||
[]byte{14, 8, 12, 6},
|
|
||||||
[]byte{14, 10},
|
|
||||||
[]byte{14, 10, 6},
|
|
||||||
[]byte{14, 10, 12},
|
|
||||||
[]byte{14, 11},
|
|
||||||
[]byte{14, 11, 8},
|
|
||||||
[]byte{14, 11, 13},
|
|
||||||
[]byte{14, 12},
|
|
||||||
[]byte{14, 14},
|
|
||||||
[]byte{14, 14, 3},
|
|
||||||
[]byte{14, 14, 9},
|
|
||||||
[]byte{15},
|
|
||||||
[]byte{15, 0},
|
|
||||||
[]byte{15, 5},
|
|
||||||
[]byte{15, 6},
|
|
||||||
[]byte{15, 9},
|
|
||||||
[]byte{15, 9, 0},
|
|
||||||
[]byte{15, 9, 2},
|
|
||||||
[]byte{15, 9, 3},
|
|
||||||
[]byte{15, 11},
|
|
||||||
[]byte{15, 11, 1},
|
|
||||||
[]byte{15, 11, 6},
|
|
||||||
[]byte{15, 12},
|
|
||||||
[]byte{15, 12, 3},
|
|
||||||
[]byte{15, 12, 14},
|
|
||||||
[]byte{15, 12, 14, 7},
|
|
||||||
[]byte{15, 12, 14, 13},
|
|
||||||
[]byte{15, 13},
|
|
||||||
[]byte{15, 14},
|
|
||||||
[]byte{15, 15},
|
|
||||||
}
|
|
@ -1,313 +0,0 @@
|
|||||||
package fixture
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var Block1_Header = types.Header{
|
|
||||||
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
|
|
||||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
|
||||||
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
|
||||||
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
|
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
Difficulty: big.NewInt(+2),
|
|
||||||
Number: big.NewInt(+1),
|
|
||||||
GasLimit: 4704588,
|
|
||||||
GasUsed: 0,
|
|
||||||
Time: 1492010458,
|
|
||||||
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
|
|
||||||
MixDigest: common.Hash{},
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
BaseFee: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
var Block1_StateNode0 = snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{12, 0},
|
|
||||||
Key: common.Hash{},
|
|
||||||
Value: []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header for last block at height 32
|
|
||||||
var Chain2_Block32_Header = types.Header{
|
|
||||||
ParentHash: common.HexToHash("0x6983c921c053d1f637449191379f61ba844013c71e5ebfacaff77f8a8bd97042"),
|
|
||||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
|
||||||
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
|
||||||
Root: common.HexToHash("0xeaa5866eb37e33fc3cfe1376b2ad7f465e7213c14e6834e1cfcef9552b2e5d5d"),
|
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
Difficulty: big.NewInt(2),
|
|
||||||
Number: big.NewInt(32),
|
|
||||||
GasLimit: 8253773,
|
|
||||||
GasUsed: 0,
|
|
||||||
Time: 1658408469,
|
|
||||||
Extra: []byte{216, 131, 1, 10, 19, 132, 103, 101, 116, 104, 136, 103, 111, 49, 46, 49, 56, 46, 50, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 113, 250, 240, 25, 148, 32, 193, 94, 196, 10, 99, 63, 251, 130, 170, 0, 176, 201, 149, 55, 230, 58, 218, 112, 84, 153, 122, 83, 134, 52, 176, 99, 53, 54, 63, 12, 226, 81, 38, 176, 57, 117, 92, 205, 237, 81, 203, 232, 220, 228, 166, 254, 206, 136, 7, 253, 2, 61, 47, 217, 235, 24, 140, 92, 1},
|
|
||||||
MixDigest: common.Hash{},
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
BaseFee: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
// State nodes for all paths at height 32
|
|
||||||
// Total 7
|
|
||||||
var Chain2_Block32_StateNodes = []snapt.Node{
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.Hash{},
|
|
||||||
Value: []byte{248, 145, 128, 128, 128, 160, 151, 6, 152, 177, 246, 151, 39, 79, 71, 219, 192, 153, 253, 0, 46, 66, 56, 238, 116, 176, 237, 244, 79, 132, 49, 29, 30, 82, 108, 53, 191, 204, 128, 128, 160, 46, 224, 200, 157, 30, 24, 225, 92, 222, 131, 123, 169, 124, 86, 228, 124, 79, 136, 236, 83, 185, 22, 67, 136, 5, 73, 46, 110, 136, 138, 101, 63, 128, 128, 160, 104, 220, 31, 84, 240, 26, 100, 148, 110, 49, 52, 120, 81, 119, 30, 251, 196, 107, 11, 134, 124, 238, 93, 61, 109, 109, 181, 208, 10, 189, 17, 92, 128, 128, 160, 171, 149, 11, 254, 75, 39, 224, 164, 133, 151, 153, 47, 109, 134, 15, 169, 139, 206, 132, 93, 220, 210, 0, 225, 235, 118, 121, 247, 173, 12, 135, 133, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{3},
|
|
||||||
Key: common.Hash{},
|
|
||||||
Value: []byte{248, 81, 128, 128, 128, 160, 209, 34, 171, 171, 30, 147, 168, 199, 137, 152, 249, 118, 14, 166, 1, 169, 116, 224, 82, 196, 237, 83, 255, 188, 228, 197, 7, 178, 144, 137, 77, 55, 128, 128, 128, 128, 128, 160, 135, 96, 108, 173, 177, 63, 201, 196, 26, 204, 72, 118, 17, 30, 76, 117, 155, 63, 68, 187, 4, 249, 78, 69, 161, 82, 178, 234, 164, 48, 158, 173, 128, 128, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{3, 3},
|
|
||||||
Key: common.HexToHash("0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d"),
|
|
||||||
Value: []byte{248, 105, 160, 32, 21, 58, 188, 102, 126, 135, 59, 96, 54, 200, 164, 107, 221, 132, 126, 42, 222, 63, 137, 185, 51, 28, 120, 239, 37, 83, 254, 161, 148, 197, 13, 184, 70, 248, 68, 1, 128, 160, 168, 127, 48, 6, 204, 116, 51, 247, 216, 182, 191, 182, 185, 124, 223, 202, 239, 15, 67, 91, 253, 165, 42, 2, 54, 10, 211, 250, 242, 149, 205, 139, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{3, 9},
|
|
||||||
Key: common.HexToHash("0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb"),
|
|
||||||
Value: []byte{248, 105, 160, 32, 252, 41, 63, 199, 2, 228, 43, 156, 2, 63, 9, 72, 38, 84, 93, 180, 47, 192, 253, 242, 186, 3, 27, 181, 34, 213, 239, 145, 122, 110, 219, 184, 70, 248, 68, 1, 128, 160, 25, 80, 158, 144, 166, 222, 32, 247, 189, 42, 34, 60, 40, 240, 56, 105, 251, 184, 132, 209, 219, 59, 60, 16, 221, 204, 228, 74, 76, 113, 37, 226, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7"),
|
|
||||||
Value: []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 10, 141, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{9},
|
|
||||||
Key: common.HexToHash("0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc"),
|
|
||||||
Value: []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{12},
|
|
||||||
Key: common.HexToHash("0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b"),
|
|
||||||
Value: []byte{248, 105, 160, 58, 188, 94, 219, 48, 85, 131, 227, 63, 102, 50, 44, 238, 228, 48, 136, 170, 153, 39, 125, 167, 114, 254, 181, 5, 53, 18, 208, 58, 10, 112, 43, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storage nodes for all paths at height 32
|
|
||||||
// Total 18
|
|
||||||
var Chain2_Block32_StorageNodes = []StorageNodeWithState{
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash(""),
|
|
||||||
Value: []byte{248, 145, 128, 128, 128, 128, 160, 46, 77, 227, 140, 57, 224, 108, 238, 40, 82, 145, 79, 210, 174, 54, 248, 0, 145, 137, 64, 229, 230, 148, 145, 250, 132, 89, 198, 8, 249, 245, 133, 128, 160, 146, 250, 117, 217, 106, 75, 51, 124, 196, 244, 29, 16, 47, 173, 5, 90, 86, 19, 15, 48, 179, 174, 60, 171, 112, 154, 92, 70, 232, 164, 141, 165, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{4},
|
|
||||||
Key: common.HexToHash(""),
|
|
||||||
Value: []byte{248, 81, 160, 167, 145, 134, 15, 219, 140, 96, 62, 101, 242, 176, 129, 164, 160, 200, 221, 13, 1, 246, 167, 156, 45, 205, 192, 88, 236, 235, 80, 105, 178, 123, 2, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 18, 136, 22, 150, 26, 170, 67, 152, 182, 246, 95, 49, 193, 199, 219, 163, 97, 25, 243, 70, 126, 235, 163, 59, 44, 16, 37, 37, 247, 50, 229, 70, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{4, 0},
|
|
||||||
Key: common.HexToHash("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
|
||||||
Value: []byte{236, 160, 32, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{4, 14},
|
|
||||||
Key: common.HexToHash("0x4e2cec6f4784547e5042a1638086e3188929f34f3c0005f8dec366c96e8195ac"),
|
|
||||||
Value: []byte{226, 160, 32, 44, 236, 111, 71, 132, 84, 126, 80, 66, 161, 99, 128, 134, 227, 24, 137, 41, 243, 79, 60, 0, 5, 248, 222, 195, 102, 201, 110, 129, 149, 172, 100},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef"),
|
|
||||||
Value: []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 159, 255, 156},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{8},
|
|
||||||
Key: common.HexToHash("0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b"),
|
|
||||||
Value: []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{8},
|
|
||||||
Key: common.HexToHash("0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b"),
|
|
||||||
Value: []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{12},
|
|
||||||
Key: common.HexToHash("0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b"),
|
|
||||||
Value: []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{12},
|
|
||||||
Key: common.HexToHash("0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b"),
|
|
||||||
Value: []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 3},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash(""),
|
|
||||||
Value: []byte{248, 145, 128, 128, 128, 128, 160, 145, 86, 15, 219, 52, 36, 164, 68, 160, 227, 156, 111, 1, 245, 112, 184, 187, 242, 26, 138, 8, 98, 129, 35, 57, 212, 165, 21, 204, 151, 229, 43, 128, 160, 250, 205, 84, 126, 141, 108, 126, 228, 162, 8, 238, 234, 141, 159, 232, 175, 70, 112, 207, 55, 165, 209, 107, 153, 54, 183, 60, 172, 194, 251, 66, 61, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{4},
|
|
||||||
Key: common.HexToHash("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
|
||||||
Value: []byte{236, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{6},
|
|
||||||
Key: common.HexToHash("0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef"),
|
|
||||||
Value: []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
|
|
||||||
},
|
|
||||||
StatePath: []byte{3, 9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash(""),
|
|
||||||
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{12},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 0,
|
|
||||||
Path: []byte{},
|
|
||||||
Key: common.HexToHash(""),
|
|
||||||
Value: []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{02},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
StatePath: []byte{12},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{02},
|
|
||||||
Key: common.HexToHash("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
|
|
||||||
Value: []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{11},
|
|
||||||
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
|
||||||
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4},
|
|
||||||
},
|
|
||||||
StatePath: []byte{12},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Node: snapt.Node{
|
|
||||||
NodeType: 2,
|
|
||||||
Path: []byte{11},
|
|
||||||
Key: common.HexToHash("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
|
||||||
Value: []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4},
|
|
||||||
},
|
|
||||||
StatePath: []byte{9},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contracts used in chain2
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
contract Test {
|
|
||||||
uint256 private count;
|
|
||||||
uint256 private count2;
|
|
||||||
|
|
||||||
event Increment(uint256 count);
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
count2 = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
function incrementCount() public returns (uint256) {
|
|
||||||
count = count + 1;
|
|
||||||
emit Increment(count);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
function destroy() public {
|
|
||||||
selfdestruct(payable(msg.sender));
|
|
||||||
}
|
|
||||||
|
|
||||||
function deleteCount2() public {
|
|
||||||
count2 = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
|
||||||
|
|
||||||
contract GLDToken is ERC20 {
|
|
||||||
constructor(uint256 initialSupply) ERC20("Gold", "GLD") {
|
|
||||||
_mint(msg.sender, initialSupply);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
152
go.mod
152
go.mod
@ -1,53 +1,58 @@
|
|||||||
module github.com/vulcanize/ipld-eth-state-snapshot
|
module github.com/cerc-io/ipld-eth-state-snapshot
|
||||||
|
|
||||||
go 1.18
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ethereum/go-ethereum v1.10.23
|
github.com/cerc-io/eth-iterator-utils v0.3.1
|
||||||
|
github.com/cerc-io/eth-testing v0.5.1
|
||||||
|
github.com/cerc-io/plugeth-statediff v0.3.1
|
||||||
|
github.com/ethereum/go-ethereum v1.14.5
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/ipfs/go-cid v0.2.0
|
github.com/prometheus/client_golang v1.16.0
|
||||||
github.com/ipfs/go-ipfs-blockstore v1.2.0
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
|
||||||
github.com/jmoiron/sqlx v1.3.5
|
|
||||||
github.com/multiformats/go-multihash v0.1.0
|
|
||||||
github.com/prometheus/client_golang v1.3.0
|
|
||||||
github.com/sirupsen/logrus v1.9.0
|
|
||||||
github.com/spf13/cobra v1.5.0
|
github.com/spf13/cobra v1.5.0
|
||||||
github.com/spf13/viper v1.12.0
|
github.com/spf13/viper v1.12.0
|
||||||
github.com/vulcanize/go-eth-state-node-iterator v1.1.4
|
github.com/stretchr/testify v1.8.4
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
github.com/DataDog/zstd v1.5.5 // indirect
|
||||||
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
|
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||||
|
github.com/cockroachdb/pebble v1.1.0 // indirect
|
||||||
|
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||||
|
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||||
|
github.com/consensys/bavard v0.1.13 // indirect
|
||||||
|
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||||
|
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect
|
||||||
|
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||||
github.com/georgysavva/scany v0.2.9 // indirect
|
github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect
|
||||||
github.com/go-kit/kit v0.10.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
github.com/georgysavva/scany v1.2.1 // indirect
|
||||||
github.com/go-stack/stack v1.8.0 // indirect
|
github.com/getsentry/sentry-go v0.22.0 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
|
github.com/go-stack/stack v1.8.1 // indirect
|
||||||
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
github.com/gorilla/websocket v1.4.2 // indirect
|
|
||||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||||
|
github.com/holiman/uint256 v1.2.4 // indirect
|
||||||
|
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/ipfs/bbloom v0.0.4 // indirect
|
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||||
github.com/ipfs/go-block-format v0.0.3 // indirect
|
|
||||||
github.com/ipfs/go-datastore v0.5.1 // indirect
|
|
||||||
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
|
||||||
github.com/ipfs/go-ipld-format v0.4.0 // indirect
|
|
||||||
github.com/ipfs/go-log v1.0.5 // indirect
|
|
||||||
github.com/ipfs/go-log/v2 v2.4.0 // indirect
|
|
||||||
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.11.0 // indirect
|
github.com/jackc/pgconn v1.11.0 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
@ -57,57 +62,62 @@ require (
|
|||||||
github.com/jackc/pgtype v1.10.0 // indirect
|
github.com/jackc/pgtype v1.10.0 // indirect
|
||||||
github.com/jackc/pgx/v4 v4.15.0 // indirect
|
github.com/jackc/pgx/v4 v4.15.0 // indirect
|
||||||
github.com/jackc/puddle v1.2.1 // indirect
|
github.com/jackc/puddle v1.2.1 // indirect
|
||||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
github.com/klauspost/compress v1.16.7 // indirect
|
||||||
github.com/lib/pq v1.10.6 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/lib/pq v1.10.9 // indirect
|
||||||
github.com/magiconair/properties v1.8.6 // indirect
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.0.4 // indirect
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||||
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/openrelayxyz/plugeth-utils v1.5.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||||
|
github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.1.0 // indirect
|
github.com/prometheus/client_model v0.4.0 // indirect
|
||||||
github.com/prometheus/common v0.7.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
github.com/prometheus/procfs v0.0.8 // indirect
|
github.com/prometheus/procfs v0.11.0 // indirect
|
||||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||||
|
github.com/shopspring/decimal v1.2.0 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/spf13/afero v1.8.2 // indirect
|
github.com/spf13/afero v1.8.2 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/stretchr/objx v0.2.0 // indirect
|
|
||||||
github.com/stretchr/testify v1.7.2 // indirect
|
|
||||||
github.com/subosito/gotenv v1.3.0 // indirect
|
github.com/subosito/gotenv v1.3.0 // indirect
|
||||||
|
github.com/supranational/blst v0.3.11 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
github.com/thoas/go-funk v0.9.3 // indirect
|
||||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||||
go.uber.org/goleak v1.1.11 // indirect
|
golang.org/x/crypto v0.22.0 // indirect
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||||
go.uber.org/zap v1.19.1 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
golang.org/x/sys v0.20.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
golang.org/x/term v0.19.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
lukechampine.com/blake3 v1.1.7 // indirect
|
lukechampine.com/blake3 v1.2.1 // indirect
|
||||||
|
rsc.io/tmplfunc v0.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/ethereum/go-ethereum v1.10.23 => github.com/vulcanize/go-ethereum v1.10.23-statediff-4.2.0-alpha
|
|
||||||
|
256
internal/mocks/gen_indexer.go
Normal file
256
internal/mocks/gen_indexer.go
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/cerc-io/plugeth-statediff/indexer (interfaces: Indexer)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
big "math/big"
|
||||||
|
reflect "reflect"
|
||||||
|
time "time"
|
||||||
|
|
||||||
|
interfaces "github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
models "github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
types "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
common "github.com/ethereum/go-ethereum/common"
|
||||||
|
types0 "github.com/ethereum/go-ethereum/core/types"
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockgenIndexer is a mock of Indexer interface.
|
||||||
|
type MockgenIndexer struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockgenIndexerMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockgenIndexerMockRecorder is the mock recorder for MockgenIndexer.
|
||||||
|
type MockgenIndexerMockRecorder struct {
|
||||||
|
mock *MockgenIndexer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockgenIndexer creates a new mock instance.
|
||||||
|
func NewMockgenIndexer(ctrl *gomock.Controller) *MockgenIndexer {
|
||||||
|
mock := &MockgenIndexer{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockgenIndexerMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockgenIndexer) EXPECT() *MockgenIndexerMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginTx mocks base method.
|
||||||
|
func (m *MockgenIndexer) BeginTx(arg0 *big.Int, arg1 context.Context) interfaces.Batch {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "BeginTx", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(interfaces.Batch)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginTx indicates an expected call of BeginTx.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) BeginTx(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginTx", reflect.TypeOf((*MockgenIndexer)(nil).BeginTx), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWatchedAddresses mocks base method.
|
||||||
|
func (m *MockgenIndexer) ClearWatchedAddresses() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClearWatchedAddresses")
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearWatchedAddresses indicates an expected call of ClearWatchedAddresses.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) ClearWatchedAddresses() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).ClearWatchedAddresses))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close mocks base method.
|
||||||
|
func (m *MockgenIndexer) Close() error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Close")
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close indicates an expected call of Close.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) Close() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockgenIndexer)(nil).Close))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock mocks base method.
|
||||||
|
func (m *MockgenIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "CurrentBlock")
|
||||||
|
ret0, _ := ret[0].(*models.HeaderModel)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock indicates an expected call of CurrentBlock.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) CurrentBlock() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentBlock", reflect.TypeOf((*MockgenIndexer)(nil).CurrentBlock))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGaps mocks base method.
|
||||||
|
func (m *MockgenIndexer) DetectGaps(arg0, arg1 uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "DetectGaps", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]*interfaces.BlockGap)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGaps indicates an expected call of DetectGaps.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) DetectGaps(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetectGaps", reflect.TypeOf((*MockgenIndexer)(nil).DetectGaps), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlock mocks base method.
|
||||||
|
func (m *MockgenIndexer) HasBlock(arg0 common.Hash, arg1 uint64) (bool, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "HasBlock", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlock indicates an expected call of HasBlock.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) HasBlock(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasBlock", reflect.TypeOf((*MockgenIndexer)(nil).HasBlock), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertWatchedAddresses mocks base method.
|
||||||
|
func (m *MockgenIndexer) InsertWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "InsertWatchedAddresses", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertWatchedAddresses indicates an expected call of InsertWatchedAddresses.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) InsertWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).InsertWatchedAddresses), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadWatchedAddresses mocks base method.
|
||||||
|
func (m *MockgenIndexer) LoadWatchedAddresses() ([]common.Address, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "LoadWatchedAddresses")
|
||||||
|
ret0, _ := ret[0].([]common.Address)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadWatchedAddresses indicates an expected call of LoadWatchedAddresses.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) LoadWatchedAddresses() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).LoadWatchedAddresses))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBlock mocks base method.
|
||||||
|
func (m *MockgenIndexer) PushBlock(arg0 *types0.Block, arg1 types0.Receipts, arg2 *big.Int) (interfaces.Batch, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PushBlock", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(interfaces.Batch)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBlock indicates an expected call of PushBlock.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) PushBlock(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushBlock", reflect.TypeOf((*MockgenIndexer)(nil).PushBlock), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushHeader mocks base method.
|
||||||
|
func (m *MockgenIndexer) PushHeader(arg0 interfaces.Batch, arg1 *types0.Header, arg2, arg3 *big.Int) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PushHeader", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushHeader indicates an expected call of PushHeader.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) PushHeader(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushHeader", reflect.TypeOf((*MockgenIndexer)(nil).PushHeader), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushIPLD mocks base method.
|
||||||
|
func (m *MockgenIndexer) PushIPLD(arg0 interfaces.Batch, arg1 types.IPLD) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PushIPLD", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushIPLD indicates an expected call of PushIPLD.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) PushIPLD(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushIPLD", reflect.TypeOf((*MockgenIndexer)(nil).PushIPLD), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushStateNode mocks base method.
|
||||||
|
func (m *MockgenIndexer) PushStateNode(arg0 interfaces.Batch, arg1 types.StateLeafNode, arg2 string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "PushStateNode", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushStateNode indicates an expected call of PushStateNode.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) PushStateNode(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushStateNode", reflect.TypeOf((*MockgenIndexer)(nil).PushStateNode), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveWatchedAddresses mocks base method.
|
||||||
|
func (m *MockgenIndexer) RemoveWatchedAddresses(arg0 []types.WatchAddressArg) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RemoveWatchedAddresses", arg0)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveWatchedAddresses indicates an expected call of RemoveWatchedAddresses.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) RemoveWatchedAddresses(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).RemoveWatchedAddresses), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportDBMetrics mocks base method.
|
||||||
|
func (m *MockgenIndexer) ReportDBMetrics(arg0 time.Duration, arg1 <-chan bool) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
m.ctrl.Call(m, "ReportDBMetrics", arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReportDBMetrics indicates an expected call of ReportDBMetrics.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) ReportDBMetrics(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportDBMetrics", reflect.TypeOf((*MockgenIndexer)(nil).ReportDBMetrics), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWatchedAddresses mocks base method.
|
||||||
|
func (m *MockgenIndexer) SetWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SetWatchedAddresses", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWatchedAddresses indicates an expected call of SetWatchedAddresses.
|
||||||
|
func (mr *MockgenIndexerMockRecorder) SetWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).SetWatchedAddresses), arg0, arg1)
|
||||||
|
}
|
88
internal/mocks/indexer.go
Normal file
88
internal/mocks/indexer.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Indexer just caches data but wraps a gomock instance, so we can mock other methods if needed
|
||||||
|
type Indexer struct {
|
||||||
|
*MockgenIndexer
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
|
IndexerData
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexerData struct {
|
||||||
|
Headers map[uint64]*types.Header
|
||||||
|
StateNodes []sdtypes.StateLeafNode
|
||||||
|
IPLDs []sdtypes.IPLD
|
||||||
|
}
|
||||||
|
|
||||||
|
// no-op mock Batch
|
||||||
|
type Batch struct{}
|
||||||
|
|
||||||
|
// NewIndexer returns a mock indexer that caches data in lists
|
||||||
|
func NewIndexer(t *testing.T) *Indexer {
|
||||||
|
ctl := gomock.NewController(t)
|
||||||
|
return &Indexer{
|
||||||
|
MockgenIndexer: NewMockgenIndexer(ctl),
|
||||||
|
IndexerData: IndexerData{
|
||||||
|
Headers: make(map[uint64]*types.Header),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Indexer) PushHeader(_ indexer.Batch, header *types.Header, _, _ *big.Int) (string, error) {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
i.Headers[header.Number.Uint64()] = header
|
||||||
|
return header.Hash().String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Indexer) PushStateNode(_ indexer.Batch, stateNode sdtypes.StateLeafNode, _ string) error {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
i.StateNodes = append(i.StateNodes, stateNode)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Indexer) PushIPLD(_ indexer.Batch, ipld sdtypes.IPLD) error {
|
||||||
|
i.Lock()
|
||||||
|
defer i.Unlock()
|
||||||
|
i.IPLDs = append(i.IPLDs, ipld)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Indexer) BeginTx(_ *big.Int, _ context.Context) indexer.Batch {
|
||||||
|
return Batch{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Batch) Submit() error { return nil }
|
||||||
|
func (Batch) BlockNumber() string { return "0" }
|
||||||
|
func (Batch) RollbackOnFailure(error) {}
|
||||||
|
|
||||||
|
// InterruptingIndexer triggers an artificial failure at a specific node count
|
||||||
|
type InterruptingIndexer struct {
|
||||||
|
*Indexer
|
||||||
|
|
||||||
|
InterruptAfter uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *InterruptingIndexer) PushStateNode(b indexer.Batch, stateNode sdtypes.StateLeafNode, h string) error {
|
||||||
|
i.RLock()
|
||||||
|
indexedCount := len(i.StateNodes)
|
||||||
|
i.RUnlock()
|
||||||
|
if indexedCount >= int(i.InterruptAfter) {
|
||||||
|
return fmt.Errorf("mock interrupt")
|
||||||
|
}
|
||||||
|
return i.Indexer.PushStateNode(b, stateNode, h)
|
||||||
|
}
|
2
main.go
2
main.go
@ -18,7 +18,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/cmd"
|
"github.com/cerc-io/ipld-eth-state-snapshot/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -19,12 +19,12 @@ package prom
|
|||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
mets "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DBStatsGetter is an interface that gets sql.DBStats.
|
// DBStatsGetter is an interface that gets sql.DBStats.
|
||||||
type DBStatsGetter interface {
|
type DBStatsGetter interface {
|
||||||
Stats() sql.Stats
|
Stats() mets.DbStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBStatsCollector implements the prometheus.Collector interface.
|
// DBStatsCollector implements the prometheus.Collector interface.
|
||||||
|
@ -33,9 +33,6 @@ var (
|
|||||||
|
|
||||||
stateNodeCount prometheus.Counter
|
stateNodeCount prometheus.Counter
|
||||||
storageNodeCount prometheus.Counter
|
storageNodeCount prometheus.Counter
|
||||||
codeNodeCount prometheus.Counter
|
|
||||||
|
|
||||||
activeIteratorCount prometheus.Gauge
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
@ -54,20 +51,16 @@ func Init() {
|
|||||||
Name: "storage_node_count",
|
Name: "storage_node_count",
|
||||||
Help: "Number of storage nodes processed",
|
Help: "Number of storage nodes processed",
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
codeNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
func RegisterGaugeFunc(name string, function func() float64) {
|
||||||
|
promauto.NewGaugeFunc(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: statsSubsystem,
|
Subsystem: statsSubsystem,
|
||||||
Name: "code_node_count",
|
Name: name,
|
||||||
Help: "Number of code nodes processed",
|
Help: name,
|
||||||
})
|
}, function)
|
||||||
|
|
||||||
activeIteratorCount = promauto.NewGauge(prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: statsSubsystem,
|
|
||||||
Name: "active_iterator_count",
|
|
||||||
Help: "Number of active iterators",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDBCollector create metric collector for given connection
|
// RegisterDBCollector create metric collector for given connection
|
||||||
@ -84,30 +77,13 @@ func IncStateNodeCount() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IncStorageNodeCount increments the number of storage nodes processed
|
// AddStorageNodeCount increments the number of storage nodes processed
|
||||||
func IncStorageNodeCount() {
|
func AddStorageNodeCount(count int) {
|
||||||
if metrics {
|
if metrics && count > 0 {
|
||||||
storageNodeCount.Inc()
|
storageNodeCount.Add(float64(count))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IncCodeNodeCount increments the number of code nodes processed
|
func Enabled() bool {
|
||||||
func IncCodeNodeCount() {
|
return metrics
|
||||||
if metrics {
|
|
||||||
codeNodeCount.Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncActiveIterCount increments the number of active iterators
|
|
||||||
func IncActiveIterCount() {
|
|
||||||
if metrics {
|
|
||||||
activeIteratorCount.Inc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecActiveIterCount decrements the number of active iterators
|
|
||||||
func DecActiveIterCount() {
|
|
||||||
if metrics {
|
|
||||||
activeIteratorCount.Dec()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
174
pkg/prom/tracker.go
Normal file
174
pkg/prom/tracker.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
package prom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
iterutil "github.com/cerc-io/eth-iterator-utils"
|
||||||
|
"github.com/cerc-io/eth-iterator-utils/tracker"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
)
|
||||||
|
|
||||||
|
var trackedIterCount atomic.Int32
|
||||||
|
|
||||||
|
// Tracker which wraps a tracked iterators in metrics-reporting iterators
|
||||||
|
type MetricsTracker struct {
|
||||||
|
*tracker.TrackerImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricsIterator struct {
|
||||||
|
trie.NodeIterator
|
||||||
|
id int32
|
||||||
|
// count uint
|
||||||
|
done bool
|
||||||
|
lastPath []byte
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTracker(file string, bufsize uint) *MetricsTracker {
|
||||||
|
return &MetricsTracker{TrackerImpl: tracker.NewImpl(file, bufsize)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *MetricsTracker) wrap(tracked *tracker.Iterator) *metricsIterator {
|
||||||
|
startPath, endPath := tracked.Bounds()
|
||||||
|
pathDepth := max(max(len(startPath), len(endPath)), 1)
|
||||||
|
totalSteps := estimateSteps(startPath, endPath, pathDepth)
|
||||||
|
|
||||||
|
ret := &metricsIterator{
|
||||||
|
NodeIterator: tracked,
|
||||||
|
id: trackedIterCount.Add(1),
|
||||||
|
}
|
||||||
|
|
||||||
|
RegisterGaugeFunc(
|
||||||
|
fmt.Sprintf("tracked_iterator_%d", ret.id),
|
||||||
|
func() float64 {
|
||||||
|
ret.RLock()
|
||||||
|
done := ret.done
|
||||||
|
lastPath := ret.lastPath
|
||||||
|
ret.RUnlock()
|
||||||
|
|
||||||
|
if done {
|
||||||
|
return 100.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastPath == nil {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimate remaining distance based on current position and node count
|
||||||
|
remainingSteps := estimateSteps(lastPath, endPath, pathDepth)
|
||||||
|
return (float64(totalSteps) - float64(remainingSteps)) / float64(totalSteps) * 100.0
|
||||||
|
})
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *MetricsTracker) Restore(ctor iterutil.IteratorConstructor) (
|
||||||
|
[]trie.NodeIterator, []trie.NodeIterator, error,
|
||||||
|
) {
|
||||||
|
iters, bases, err := t.TrackerImpl.Restore(ctor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
ret := make([]trie.NodeIterator, len(iters))
|
||||||
|
for i, tracked := range iters {
|
||||||
|
ret[i] = t.wrap(tracked)
|
||||||
|
}
|
||||||
|
return ret, bases, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *MetricsTracker) Tracked(it trie.NodeIterator) trie.NodeIterator {
|
||||||
|
tracked := t.TrackerImpl.Tracked(it)
|
||||||
|
return t.wrap(tracked)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *metricsIterator) Next(descend bool) bool {
|
||||||
|
ret := it.NodeIterator.Next(descend)
|
||||||
|
it.Lock()
|
||||||
|
defer it.Unlock()
|
||||||
|
if ret {
|
||||||
|
it.lastPath = it.Path()
|
||||||
|
} else {
|
||||||
|
it.done = true
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Estimate the number of iterations necessary to step from start to end.
|
||||||
|
func estimateSteps(start []byte, end []byte, depth int) uint64 {
|
||||||
|
// We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
|
||||||
|
// For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
|
||||||
|
// start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
|
||||||
|
// be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
|
||||||
|
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
|
||||||
|
// to move from 0500:06 is 16.
|
||||||
|
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
|
||||||
|
if 0 == len(start) {
|
||||||
|
start = []byte{0x0}
|
||||||
|
}
|
||||||
|
if 0 == len(end) {
|
||||||
|
end = []byte{0x10}
|
||||||
|
}
|
||||||
|
normalizedStart := make([]byte, depth)
|
||||||
|
normalizedEnd := make([]byte, depth)
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
if i < len(start) {
|
||||||
|
normalizedStart[i] = start[i]
|
||||||
|
}
|
||||||
|
if i < len(end) {
|
||||||
|
normalizedEnd[i] = end[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return normalizedStart, normalizedEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have no need to handle negative exponents, so uints are fine.
|
||||||
|
pow := func(x uint64, y uint) uint64 {
|
||||||
|
if 0 == y {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
ret := x
|
||||||
|
for i := uint(0); i < y; i++ {
|
||||||
|
ret *= x
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fix the paths.
|
||||||
|
start, end = normalizePathRange(start, end, depth)
|
||||||
|
|
||||||
|
// No negative distances, if the start is already >= end, the distance is 0.
|
||||||
|
if bytes.Compare(start, end) >= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtract each component, right to left, carrying over if necessary.
|
||||||
|
difference := make([]byte, len(start))
|
||||||
|
var carry byte = 0
|
||||||
|
for i := len(start) - 1; i >= 0; i-- {
|
||||||
|
result := end[i] - start[i] - carry
|
||||||
|
if result > 0xf && i > 0 {
|
||||||
|
result &= 0xf
|
||||||
|
carry = 1
|
||||||
|
} else {
|
||||||
|
carry = 0
|
||||||
|
}
|
||||||
|
difference[i] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the result.
|
||||||
|
var ret uint64 = 0
|
||||||
|
for i := 0; i < len(difference); i++ {
|
||||||
|
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a int, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
@ -23,8 +23,9 @@ import (
|
|||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
ethNode "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
|
ethNode "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,36 +41,33 @@ const (
|
|||||||
|
|
||||||
// Config contains params for both databases the service uses
|
// Config contains params for both databases the service uses
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Eth *EthConfig
|
Eth *EthDBConfig
|
||||||
DB *DBConfig
|
DB *DBConfig
|
||||||
File *FileConfig
|
File *FileConfig
|
||||||
Service *ServiceConfig
|
Service *ServiceConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// EthConfig is config parameters for the chain.
|
// EthDBConfig is config parameters for the chain DB.
|
||||||
type EthConfig struct {
|
type EthDBConfig struct {
|
||||||
LevelDBPath string
|
DBPath string
|
||||||
AncientDBPath string
|
AncientDBPath string
|
||||||
NodeInfo ethNode.Info
|
NodeInfo ethNode.Info
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBConfig is config parameters for DB.
|
// DBConfig contains options for DB output mode.
|
||||||
type DBConfig struct {
|
type DBConfig = postgres.Config
|
||||||
URI string
|
|
||||||
ConnConfig postgres.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
type FileConfig struct {
|
// FileConfig contains options for file output mode. Note that this service currently only supports
|
||||||
OutputDir string
|
// CSV output, and does not record watched addresses, so not all fields are used.
|
||||||
}
|
type FileConfig = file.Config
|
||||||
|
|
||||||
type ServiceConfig struct {
|
type ServiceConfig struct {
|
||||||
AllowedAccounts map[common.Address]struct{}
|
AllowedAccounts []common.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConfig(mode SnapshotMode) (*Config, error) {
|
func NewConfig(mode SnapshotMode) (*Config, error) {
|
||||||
ret := &Config{
|
ret := &Config{
|
||||||
&EthConfig{},
|
&EthDBConfig{},
|
||||||
&DBConfig{},
|
&DBConfig{},
|
||||||
&FileConfig{},
|
&FileConfig{},
|
||||||
&ServiceConfig{},
|
&ServiceConfig{},
|
||||||
@ -79,18 +77,19 @@ func NewConfig(mode SnapshotMode) (*Config, error) {
|
|||||||
|
|
||||||
func NewInPlaceSnapshotConfig() *Config {
|
func NewInPlaceSnapshotConfig() *Config {
|
||||||
ret := &Config{
|
ret := &Config{
|
||||||
&EthConfig{},
|
&EthDBConfig{},
|
||||||
&DBConfig{},
|
&DBConfig{},
|
||||||
&FileConfig{},
|
&FileConfig{},
|
||||||
&ServiceConfig{},
|
&ServiceConfig{},
|
||||||
}
|
}
|
||||||
ret.DB.Init()
|
InitDB(ret.DB)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init Initialises config
|
// Init Initialises config
|
||||||
func (c *Config) Init(mode SnapshotMode) error {
|
func (c *Config) Init(mode SnapshotMode) error {
|
||||||
|
viper.BindEnv(LOG_FILE_TOML, LOG_FILE)
|
||||||
viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID)
|
viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID)
|
||||||
viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME)
|
viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME)
|
||||||
viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK)
|
viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK)
|
||||||
@ -105,24 +104,27 @@ func (c *Config) Init(mode SnapshotMode) error {
|
|||||||
ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML),
|
ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML),
|
||||||
}
|
}
|
||||||
|
|
||||||
viper.BindEnv(ANCIENT_DB_PATH_TOML, ANCIENT_DB_PATH)
|
viper.BindEnv(ETHDB_ANCIENT_TOML, ETHDB_ANCIENT)
|
||||||
viper.BindEnv(LVL_DB_PATH_TOML, LVL_DB_PATH)
|
viper.BindEnv(ETHDB_PATH_TOML, ETHDB_PATH)
|
||||||
|
|
||||||
c.Eth.AncientDBPath = viper.GetString(ANCIENT_DB_PATH_TOML)
|
c.Eth.DBPath = viper.GetString(ETHDB_PATH_TOML)
|
||||||
c.Eth.LevelDBPath = viper.GetString(LVL_DB_PATH_TOML)
|
c.Eth.AncientDBPath = viper.GetString(ETHDB_ANCIENT_TOML)
|
||||||
|
if len(c.Eth.AncientDBPath) == 0 {
|
||||||
|
c.Eth.AncientDBPath = c.Eth.DBPath + "/ancient"
|
||||||
|
}
|
||||||
|
|
||||||
switch mode {
|
switch mode {
|
||||||
case FileSnapshot:
|
case FileSnapshot:
|
||||||
c.File.Init()
|
InitFile(c.File)
|
||||||
case PgSnapshot:
|
case PgSnapshot:
|
||||||
c.DB.Init()
|
InitDB(c.DB)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("no output mode specified")
|
return fmt.Errorf("no output mode specified")
|
||||||
}
|
}
|
||||||
return c.Service.Init()
|
return c.Service.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *DBConfig) Init() {
|
func InitDB(c *DBConfig) {
|
||||||
viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME)
|
viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME)
|
||||||
viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME)
|
viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME)
|
||||||
viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT)
|
viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT)
|
||||||
@ -132,41 +134,52 @@ func (c *DBConfig) Init() {
|
|||||||
viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS)
|
viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS)
|
||||||
viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME)
|
viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME)
|
||||||
|
|
||||||
dbParams := postgres.Config{}
|
|
||||||
// DB params
|
// DB params
|
||||||
dbParams.DatabaseName = viper.GetString(DATABASE_NAME_TOML)
|
c.DatabaseName = viper.GetString(DATABASE_NAME_TOML)
|
||||||
dbParams.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML)
|
c.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML)
|
||||||
dbParams.Port = viper.GetInt(DATABASE_PORT_TOML)
|
c.Port = viper.GetInt(DATABASE_PORT_TOML)
|
||||||
dbParams.Username = viper.GetString(DATABASE_USER_TOML)
|
c.Username = viper.GetString(DATABASE_USER_TOML)
|
||||||
dbParams.Password = viper.GetString(DATABASE_PASSWORD_TOML)
|
c.Password = viper.GetString(DATABASE_PASSWORD_TOML)
|
||||||
// Connection config
|
// Connection config
|
||||||
dbParams.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML)
|
c.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML)
|
||||||
dbParams.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML)
|
c.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML)
|
||||||
dbParams.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second
|
c.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second
|
||||||
|
|
||||||
c.ConnConfig = dbParams
|
c.Driver = postgres.SQLX
|
||||||
c.URI = dbParams.DbConnectionString()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FileConfig) Init() error {
|
func InitFile(c *FileConfig) error {
|
||||||
viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR)
|
viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR)
|
||||||
c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML)
|
c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML)
|
||||||
if c.OutputDir == "" {
|
if c.OutputDir == "" {
|
||||||
logrus.Infof("no output directory set, using default: %s", defaultOutputDir)
|
logrus.Infof("no output directory set, using default: %s", defaultOutputDir)
|
||||||
c.OutputDir = defaultOutputDir
|
c.OutputDir = defaultOutputDir
|
||||||
}
|
}
|
||||||
|
// Only support CSV for now
|
||||||
|
c.Mode = file.CSV
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServiceConfig) Init() error {
|
func (c *ServiceConfig) Init() error {
|
||||||
|
viper.BindEnv(SNAPSHOT_BLOCK_HEIGHT_TOML, SNAPSHOT_BLOCK_HEIGHT)
|
||||||
|
viper.BindEnv(SNAPSHOT_MODE_TOML, SNAPSHOT_MODE)
|
||||||
|
viper.BindEnv(SNAPSHOT_WORKERS_TOML, SNAPSHOT_WORKERS)
|
||||||
|
viper.BindEnv(SNAPSHOT_RECOVERY_FILE_TOML, SNAPSHOT_RECOVERY_FILE)
|
||||||
|
|
||||||
|
viper.BindEnv(PROM_DB_STATS_TOML, PROM_DB_STATS)
|
||||||
|
viper.BindEnv(PROM_HTTP_TOML, PROM_HTTP)
|
||||||
|
viper.BindEnv(PROM_HTTP_ADDR_TOML, PROM_HTTP_ADDR)
|
||||||
|
viper.BindEnv(PROM_HTTP_PORT_TOML, PROM_HTTP_PORT)
|
||||||
|
viper.BindEnv(PROM_METRICS_TOML, PROM_METRICS)
|
||||||
|
|
||||||
viper.BindEnv(SNAPSHOT_ACCOUNTS_TOML, SNAPSHOT_ACCOUNTS)
|
viper.BindEnv(SNAPSHOT_ACCOUNTS_TOML, SNAPSHOT_ACCOUNTS)
|
||||||
var allowedAccounts []string
|
var allowedAccounts []string
|
||||||
viper.UnmarshalKey(SNAPSHOT_ACCOUNTS_TOML, &allowedAccounts)
|
viper.UnmarshalKey(SNAPSHOT_ACCOUNTS_TOML, &allowedAccounts)
|
||||||
accountsLen := len(allowedAccounts)
|
accountsLen := len(allowedAccounts)
|
||||||
if accountsLen != 0 {
|
if accountsLen != 0 {
|
||||||
c.AllowedAccounts = make(map[common.Address]struct{}, accountsLen)
|
c.AllowedAccounts = make([]common.Address, 0, accountsLen)
|
||||||
for _, allowedAccount := range allowedAccounts {
|
for _, allowedAccount := range allowedAccounts {
|
||||||
c.AllowedAccounts[common.HexToAddress(allowedAccount)] = struct{}{}
|
c.AllowedAccounts = append(c.AllowedAccounts, common.HexToAddress(allowedAccount))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logrus.Infof("no snapshot addresses specified, will perform snapshot of entire trie(s)")
|
logrus.Infof("no snapshot addresses specified, will perform snapshot of entire trie(s)")
|
||||||
|
27
pkg/snapshot/config_test.go
Normal file
27
pkg/snapshot/config_test.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package snapshot_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
|
ethnode "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultNodeInfo = ethnode.Info{
|
||||||
|
ID: "test_nodeid",
|
||||||
|
ClientName: "test_client",
|
||||||
|
GenesisBlock: "TEST_GENESIS",
|
||||||
|
NetworkID: "test_network",
|
||||||
|
ChainID: 0,
|
||||||
|
}
|
||||||
|
DefaultPgConfig = postgres.Config{
|
||||||
|
Hostname: "localhost",
|
||||||
|
Port: 8077,
|
||||||
|
DatabaseName: "cerc_testing",
|
||||||
|
Username: "vdbm",
|
||||||
|
Password: "password",
|
||||||
|
|
||||||
|
MaxIdle: 0,
|
||||||
|
MaxConnLifetime: 0,
|
||||||
|
MaxConns: 4,
|
||||||
|
}
|
||||||
|
)
|
@ -21,12 +21,10 @@ const (
|
|||||||
SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS"
|
SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS"
|
||||||
SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE"
|
SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE"
|
||||||
SNAPSHOT_MODE = "SNAPSHOT_MODE"
|
SNAPSHOT_MODE = "SNAPSHOT_MODE"
|
||||||
SNAPSHOT_START_HEIGHT = "SNAPSHOT_START_HEIGHT"
|
|
||||||
SNAPSHOT_END_HEIGHT = "SNAPSHOT_END_HEIGHT"
|
|
||||||
SNAPSHOT_ACCOUNTS = "SNAPSHOT_ACCOUNTS"
|
SNAPSHOT_ACCOUNTS = "SNAPSHOT_ACCOUNTS"
|
||||||
|
|
||||||
LOGRUS_LEVEL = "LOGRUS_LEVEL"
|
LOG_LEVEL = "LOG_LEVEL"
|
||||||
LOGRUS_FILE = "LOGRUS_FILE"
|
LOG_FILE = "LOG_FILE"
|
||||||
|
|
||||||
PROM_METRICS = "PROM_METRICS"
|
PROM_METRICS = "PROM_METRICS"
|
||||||
PROM_HTTP = "PROM_HTTP"
|
PROM_HTTP = "PROM_HTTP"
|
||||||
@ -36,8 +34,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR"
|
FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR"
|
||||||
|
|
||||||
ANCIENT_DB_PATH = "ANCIENT_DB_PATH"
|
ETHDB_ANCIENT = "ETHDB_ANCIENT"
|
||||||
LVL_DB_PATH = "LVL_DB_PATH"
|
ETHDB_PATH = "ETHDB_PATH"
|
||||||
|
|
||||||
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
||||||
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
||||||
@ -61,12 +59,10 @@ const (
|
|||||||
SNAPSHOT_WORKERS_TOML = "snapshot.workers"
|
SNAPSHOT_WORKERS_TOML = "snapshot.workers"
|
||||||
SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile"
|
SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile"
|
||||||
SNAPSHOT_MODE_TOML = "snapshot.mode"
|
SNAPSHOT_MODE_TOML = "snapshot.mode"
|
||||||
SNAPSHOT_START_HEIGHT_TOML = "snapshot.startHeight"
|
|
||||||
SNAPSHOT_END_HEIGHT_TOML = "snapshot.endHeight"
|
|
||||||
SNAPSHOT_ACCOUNTS_TOML = "snapshot.accounts"
|
SNAPSHOT_ACCOUNTS_TOML = "snapshot.accounts"
|
||||||
|
|
||||||
LOGRUS_LEVEL_TOML = "log.level"
|
LOG_LEVEL_TOML = "log.level"
|
||||||
LOGRUS_FILE_TOML = "log.file"
|
LOG_FILE_TOML = "log.file"
|
||||||
|
|
||||||
PROM_METRICS_TOML = "prom.metrics"
|
PROM_METRICS_TOML = "prom.metrics"
|
||||||
PROM_HTTP_TOML = "prom.http"
|
PROM_HTTP_TOML = "prom.http"
|
||||||
@ -76,8 +72,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR_TOML = "file.outputDir"
|
FILE_OUTPUT_DIR_TOML = "file.outputDir"
|
||||||
|
|
||||||
ANCIENT_DB_PATH_TOML = "leveldb.ancient"
|
ETHDB_ANCIENT_TOML = "ethdb.ancient"
|
||||||
LVL_DB_PATH_TOML = "leveldb.path"
|
ETHDB_PATH_TOML = "ethdb.path"
|
||||||
|
|
||||||
ETH_CLIENT_NAME_TOML = "ethereum.clientName"
|
ETH_CLIENT_NAME_TOML = "ethereum.clientName"
|
||||||
ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock"
|
ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock"
|
||||||
@ -101,12 +97,10 @@ const (
|
|||||||
SNAPSHOT_WORKERS_CLI = "workers"
|
SNAPSHOT_WORKERS_CLI = "workers"
|
||||||
SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file"
|
SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file"
|
||||||
SNAPSHOT_MODE_CLI = "snapshot-mode"
|
SNAPSHOT_MODE_CLI = "snapshot-mode"
|
||||||
SNAPSHOT_START_HEIGHT_CLI = "start-height"
|
|
||||||
SNAPSHOT_END_HEIGHT_CLI = "end-height"
|
|
||||||
SNAPSHOT_ACCOUNTS_CLI = "snapshot-accounts"
|
SNAPSHOT_ACCOUNTS_CLI = "snapshot-accounts"
|
||||||
|
|
||||||
LOGRUS_LEVEL_CLI = "log-level"
|
LOG_LEVEL_CLI = "log-level"
|
||||||
LOGRUS_FILE_CLI = "log-file"
|
LOG_FILE_CLI = "log-file"
|
||||||
|
|
||||||
PROM_METRICS_CLI = "prom-metrics"
|
PROM_METRICS_CLI = "prom-metrics"
|
||||||
PROM_HTTP_CLI = "prom-http"
|
PROM_HTTP_CLI = "prom-http"
|
||||||
@ -116,8 +110,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR_CLI = "output-dir"
|
FILE_OUTPUT_DIR_CLI = "output-dir"
|
||||||
|
|
||||||
ANCIENT_DB_PATH_CLI = "ancient-path"
|
ETHDB_ANCIENT_CLI = "ancient-path"
|
||||||
LVL_DB_PATH_CLI = "leveldb-path"
|
ETHDB_PATH_CLI = "ethdb-path"
|
||||||
|
|
||||||
ETH_CLIENT_NAME_CLI = "ethereum-client-name"
|
ETH_CLIENT_NAME_CLI = "ethereum-client-name"
|
||||||
ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block"
|
ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block"
|
||||||
|
@ -1,302 +0,0 @@
|
|||||||
// Copyright © 2020 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package publisher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ snapt.Publisher = (*publisher)(nil)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// tables written once per block
|
|
||||||
perBlockTables = []*snapt.Table{
|
|
||||||
&snapt.TableIPLDBlock,
|
|
||||||
&snapt.TableNodeInfo,
|
|
||||||
&snapt.TableHeader,
|
|
||||||
}
|
|
||||||
// tables written during state iteration
|
|
||||||
perNodeTables = []*snapt.Table{
|
|
||||||
&snapt.TableIPLDBlock,
|
|
||||||
&snapt.TableStateNode,
|
|
||||||
&snapt.TableStorageNode,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const logInterval = 1 * time.Minute
|
|
||||||
|
|
||||||
type publisher struct {
|
|
||||||
dir string // dir containing output files
|
|
||||||
writers fileWriters
|
|
||||||
|
|
||||||
nodeInfo nodeinfo.Info
|
|
||||||
|
|
||||||
startTime time.Time
|
|
||||||
currBatchSize uint
|
|
||||||
stateNodeCounter uint64
|
|
||||||
storageNodeCounter uint64
|
|
||||||
codeNodeCounter uint64
|
|
||||||
txCounter uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileWriter struct {
|
|
||||||
*csv.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileWriters wraps the file writers for each output table
|
|
||||||
type fileWriters map[string]fileWriter
|
|
||||||
|
|
||||||
type fileTx struct{ fileWriters }
|
|
||||||
|
|
||||||
func (tx fileWriters) Commit() error {
|
|
||||||
for _, w := range tx {
|
|
||||||
w.Flush()
|
|
||||||
if err := w.Error(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (fileWriters) Rollback() error { return nil } // TODO: delete the file?
|
|
||||||
|
|
||||||
func newFileWriter(path string) (ret fileWriter, err error) {
|
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ret = fileWriter{csv.NewWriter(file)}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx fileWriters) write(tbl *snapt.Table, args ...interface{}) error {
|
|
||||||
row := tbl.ToCsvRow(args...)
|
|
||||||
return tx[tbl.Name].Write(row)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeFileWriters(dir string, tables []*snapt.Table) (fileWriters, error) {
|
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
writers := fileWriters{}
|
|
||||||
for _, tbl := range tables {
|
|
||||||
w, err := newFileWriter(TableFile(dir, tbl.Name))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
writers[tbl.Name] = w
|
|
||||||
}
|
|
||||||
return writers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPublisher creates a publisher which writes to per-table CSV files which can be imported
|
|
||||||
// with the Postgres COPY command.
|
|
||||||
// The output directory will be created if it does not exist.
|
|
||||||
func NewPublisher(path string, node nodeinfo.Info) (*publisher, error) {
|
|
||||||
if err := os.MkdirAll(path, 0777); err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to make MkdirAll for path: %s err: %s", path, err)
|
|
||||||
}
|
|
||||||
writers, err := makeFileWriters(path, perBlockTables)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pub := &publisher{
|
|
||||||
writers: writers,
|
|
||||||
dir: path,
|
|
||||||
nodeInfo: node,
|
|
||||||
startTime: time.Now(),
|
|
||||||
}
|
|
||||||
go pub.logNodeCounters()
|
|
||||||
return pub, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TableFile(dir, name string) string { return filepath.Join(dir, name+".csv") }
|
|
||||||
|
|
||||||
func (p *publisher) txDir(index uint32) string {
|
|
||||||
return filepath.Join(p.dir, fmt.Sprintf("%010d", index))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) BeginTx() (snapt.Tx, error) {
|
|
||||||
index := atomic.AddUint32(&p.txCounter, 1) - 1
|
|
||||||
dir := p.txDir(index)
|
|
||||||
writers, err := makeFileWriters(dir, perNodeTables)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileTx{writers}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
|
||||||
// returns the CID and blockstore prefixed multihash key
|
|
||||||
func (tx fileWriters) publishRaw(codec uint64, raw []byte, height *big.Int) (cid, prefixedKey string, err error) {
|
|
||||||
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cid = c.String()
|
|
||||||
prefixedKey, err = tx.publishIPLD(c, raw, height)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx fileWriters) publishIPLD(c cid.Cid, raw []byte, height *big.Int) (string, error) {
|
|
||||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
|
||||||
return prefixedKey, tx.write(&snapt.TableIPLDBlock, height.String(), prefixedKey, raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary
|
|
||||||
// indexes in the header_cids table
|
|
||||||
func (p *publisher) PublishHeader(header *types.Header) error {
|
|
||||||
headerNode, err := ipld.NewEthHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = p.writers.publishIPLD(headerNode.Cid(), headerNode.RawData(), header.Number); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
|
|
||||||
err = p.writers.write(&snapt.TableNodeInfo, p.nodeInfo.GenesisBlock, p.nodeInfo.NetworkID, p.nodeInfo.ID,
|
|
||||||
p.nodeInfo.ClientName, p.nodeInfo.ChainID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = p.writers.write(&snapt.TableHeader, header.Number.String(), header.Hash().Hex(), header.ParentHash.Hex(),
|
|
||||||
headerNode.Cid().String(), 0, p.nodeInfo.ID, 0, header.Root.Hex(), header.TxHash.Hex(),
|
|
||||||
header.ReceiptHash.Hex(), header.UncleHash.Hex(), header.Bloom.Bytes(), header.Time, mhKey,
|
|
||||||
0, header.Coinbase.String())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return p.writers.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes
|
|
||||||
// in the state_cids table
|
|
||||||
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, height *big.Int, snapTx snapt.Tx) error {
|
|
||||||
var stateKey string
|
|
||||||
if !snapt.IsNullHash(node.Key) {
|
|
||||||
stateKey = node.Key.Hex()
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(fileTx)
|
|
||||||
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value, height)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tx.write(&snapt.TableStateNode, height.String(), headerID, stateKey, stateCIDStr, node.Path,
|
|
||||||
node.NodeType, false, mhKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// increment state node counter.
|
|
||||||
atomic.AddUint64(&p.stateNodeCounter, 1)
|
|
||||||
prom.IncStateNodeCount()
|
|
||||||
|
|
||||||
// increment current batch size counter
|
|
||||||
p.currBatchSize += 2
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary
|
|
||||||
// indexes in the storage_cids table
|
|
||||||
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, height *big.Int, statePath []byte, snapTx snapt.Tx) error {
|
|
||||||
var storageKey string
|
|
||||||
if !snapt.IsNullHash(node.Key) {
|
|
||||||
storageKey = node.Key.Hex()
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(fileTx)
|
|
||||||
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value, height)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tx.write(&snapt.TableStorageNode, height.String(), headerID, statePath, storageKey, storageCIDStr, node.Path,
|
|
||||||
node.NodeType, false, mhKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// increment storage node counter.
|
|
||||||
atomic.AddUint64(&p.storageNodeCounter, 1)
|
|
||||||
prom.IncStorageNodeCount()
|
|
||||||
|
|
||||||
// increment current batch size counter
|
|
||||||
p.currBatchSize += 2
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishCode writes code to the ipfs backing pg datastore
|
|
||||||
func (p *publisher) PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
|
|
||||||
// no codec for code, doesn't matter though since blockstore key is multihash-derived
|
|
||||||
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(fileTx)
|
|
||||||
if err = tx.write(&snapt.TableIPLDBlock, height.String(), mhKey, codeBytes); err != nil {
|
|
||||||
return fmt.Errorf("error publishing code IPLD: %v", err)
|
|
||||||
}
|
|
||||||
// increment code node counter.
|
|
||||||
atomic.AddUint64(&p.codeNodeCounter, 1)
|
|
||||||
prom.IncCodeNodeCount()
|
|
||||||
|
|
||||||
p.currBatchSize++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
|
|
||||||
return tx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// logNodeCounters periodically logs the number of node processed.
|
|
||||||
func (p *publisher) logNodeCounters() {
|
|
||||||
t := time.NewTicker(logInterval)
|
|
||||||
for range t.C {
|
|
||||||
p.printNodeCounters("progress")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) printNodeCounters(msg string) {
|
|
||||||
logrus.WithFields(logrus.Fields{
|
|
||||||
"runtime": time.Now().Sub(p.startTime).String(),
|
|
||||||
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
|
|
||||||
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
|
|
||||||
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
|
|
||||||
}).Info(msg)
|
|
||||||
}
|
|
@ -1,130 +0,0 @@
|
|||||||
package publisher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
|
|
||||||
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
pgConfig = test.DefaultPgConfig
|
|
||||||
nodeInfo = test.DefaultNodeInfo
|
|
||||||
// tables ordered according to fkey depedencies
|
|
||||||
allTables = []*snapt.Table{
|
|
||||||
&snapt.TableIPLDBlock,
|
|
||||||
&snapt.TableNodeInfo,
|
|
||||||
&snapt.TableHeader,
|
|
||||||
&snapt.TableStateNode,
|
|
||||||
&snapt.TableStorageNode,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func writeFiles(t *testing.T, dir string) *publisher {
|
|
||||||
pub, err := NewPublisher(dir, nodeInfo)
|
|
||||||
test.NoError(t, err)
|
|
||||||
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
|
|
||||||
tx, err := pub.BeginTx()
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
headerID := fixt.Block1_Header.Hash().String()
|
|
||||||
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, fixt.Block1_Header.Number, tx))
|
|
||||||
|
|
||||||
test.NoError(t, tx.Commit())
|
|
||||||
return pub
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify that we can parse the csvs
|
|
||||||
// TODO check actual data
|
|
||||||
func verifyFileData(t *testing.T, path string, tbl *snapt.Table) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
test.NoError(t, err)
|
|
||||||
r := csv.NewReader(file)
|
|
||||||
test.NoError(t, err)
|
|
||||||
r.FieldsPerRecord = len(tbl.Columns)
|
|
||||||
|
|
||||||
for {
|
|
||||||
_, err := r.Read()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
test.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriting(t *testing.T) {
|
|
||||||
dir := t.TempDir()
|
|
||||||
// tempdir like /tmp/TempFoo/001/, TempFoo defaults to 0700
|
|
||||||
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
|
|
||||||
|
|
||||||
pub := writeFiles(t, dir)
|
|
||||||
|
|
||||||
for _, tbl := range perBlockTables {
|
|
||||||
verifyFileData(t, TableFile(pub.dir, tbl.Name), tbl)
|
|
||||||
}
|
|
||||||
for i := uint32(0); i < pub.txCounter; i++ {
|
|
||||||
for _, tbl := range perNodeTables {
|
|
||||||
verifyFileData(t, TableFile(pub.txDir(i), tbl.Name), tbl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: DB user requires role membership "pg_read_server_files"
|
|
||||||
func TestPgCopy(t *testing.T) {
|
|
||||||
test.NeedsDB(t)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
|
|
||||||
pub := writeFiles(t, dir)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
|
|
||||||
test.NoError(t, err)
|
|
||||||
db := postgres.NewPostgresDB(driver)
|
|
||||||
|
|
||||||
sql.TearDownDB(t, db)
|
|
||||||
|
|
||||||
// copy from files
|
|
||||||
pgCopyStatement := `COPY %s FROM '%s' CSV`
|
|
||||||
for _, tbl := range perBlockTables {
|
|
||||||
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.dir, tbl.Name))
|
|
||||||
_, err = db.Exec(ctx, stm)
|
|
||||||
test.NoError(t, err)
|
|
||||||
}
|
|
||||||
for i := uint32(0); i < pub.txCounter; i++ {
|
|
||||||
for _, tbl := range perNodeTables {
|
|
||||||
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.txDir(i), tbl.Name))
|
|
||||||
_, err = db.Exec(ctx, stm)
|
|
||||||
test.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check header was successfully committed
|
|
||||||
pgQueryHeader := `SELECT cid, block_hash
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_number = $1`
|
|
||||||
type res struct {
|
|
||||||
CID string
|
|
||||||
BlockHash string
|
|
||||||
}
|
|
||||||
var header res
|
|
||||||
err = db.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
|
|
||||||
&header.CID, &header.BlockHash)
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
|
|
||||||
test.NoError(t, err)
|
|
||||||
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
|
|
||||||
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
|
|
||||||
}
|
|
@ -1,61 +0,0 @@
|
|||||||
// Copyright © 2022 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package snapshot
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jmoiron/sqlx"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
. "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
stateSnapShotPgStr = "SELECT state_snapshot($1, $2)"
|
|
||||||
storageSnapShotPgStr = "SELECT storage_snapshot($1, $2)"
|
|
||||||
)
|
|
||||||
|
|
||||||
type InPlaceSnapshotParams struct {
|
|
||||||
StartHeight uint64
|
|
||||||
EndHeight uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateInPlaceSnapshot(config *Config, params InPlaceSnapshotParams) error {
|
|
||||||
db, err := sqlx.Connect("postgres", config.DB.ConnConfig.DbConnectionString())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tx, err := db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
err = CommitOrRollback(tx, err)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("CommitOrRollback failed: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err = tx.Exec(stateSnapShotPgStr, params.StartHeight, params.EndHeight); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = tx.Exec(storageSnapShotPgStr, params.StartHeight, params.EndHeight); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,160 +0,0 @@
|
|||||||
// Copyright © 2022 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package snapshot
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
|
|
||||||
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/pg"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
pgConfig = test.DefaultPgConfig
|
|
||||||
nodeInfo = test.DefaultNodeInfo
|
|
||||||
snapshotHeight = 4
|
|
||||||
|
|
||||||
allTables = []*snapt.Table{
|
|
||||||
&snapt.TableIPLDBlock,
|
|
||||||
&snapt.TableNodeInfo,
|
|
||||||
&snapt.TableHeader,
|
|
||||||
&snapt.TableStateNode,
|
|
||||||
&snapt.TableStorageNode,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func writeData(t *testing.T, db *postgres.DB) snapt.Publisher {
|
|
||||||
pub := pg.NewPublisher(db)
|
|
||||||
tx, err := pub.BeginTx()
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
for _, block := range fixt.InPlaceSnapshotBlocks {
|
|
||||||
headerID := block.Hash.String()
|
|
||||||
|
|
||||||
for _, stateNode := range block.StateNodes {
|
|
||||||
test.NoError(t, pub.PublishStateNode(&stateNode, headerID, block.Number, tx))
|
|
||||||
}
|
|
||||||
|
|
||||||
for index, stateStorageNodes := range block.StorageNodes {
|
|
||||||
stateNode := block.StateNodes[index]
|
|
||||||
|
|
||||||
for _, storageNode := range stateStorageNodes {
|
|
||||||
test.NoError(t, pub.PublishStorageNode(&storageNode, headerID, block.Number, stateNode.Path, tx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
test.NoError(t, tx.Commit())
|
|
||||||
|
|
||||||
test.NoError(t, pub.PublishHeader(&fixt.Block4_Header))
|
|
||||||
return pub
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateInPlaceSnapshot(t *testing.T) {
|
|
||||||
test.NeedsDB(t)
|
|
||||||
ctx := context.Background()
|
|
||||||
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
|
|
||||||
test.NoError(t, err)
|
|
||||||
db := postgres.NewPostgresDB(driver)
|
|
||||||
|
|
||||||
test_helpers.TearDownDB(t, db)
|
|
||||||
|
|
||||||
_ = writeData(t, db)
|
|
||||||
|
|
||||||
params := InPlaceSnapshotParams{StartHeight: uint64(0), EndHeight: uint64(snapshotHeight)}
|
|
||||||
config := &Config{
|
|
||||||
Eth: &EthConfig{
|
|
||||||
NodeInfo: test.DefaultNodeInfo,
|
|
||||||
},
|
|
||||||
DB: &DBConfig{
|
|
||||||
URI: pgConfig.DbConnectionString(),
|
|
||||||
ConnConfig: pgConfig,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = CreateInPlaceSnapshot(config, params)
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
// Check inplace snapshot was created for state_cids
|
|
||||||
stateNodes := make([]models.StateNodeModel, 0)
|
|
||||||
pgQueryStateCids := `SELECT cast(state_cids.block_number AS TEXT), state_cids.cid, state_cids.state_leaf_key, state_cids.node_type, state_cids.state_path, state_cids.header_id, state_cids.mh_key
|
|
||||||
FROM eth.state_cids
|
|
||||||
WHERE eth.state_cids.block_number = $1
|
|
||||||
ORDER BY state_cids.state_path`
|
|
||||||
err = db.Select(ctx, &stateNodes, pgQueryStateCids, snapshotHeight)
|
|
||||||
test.NoError(t, err)
|
|
||||||
test.ExpectEqual(t, 4, len(stateNodes))
|
|
||||||
expectedStateNodes := fixt.ExpectedStateNodes
|
|
||||||
|
|
||||||
pgIpfsGet := `SELECT data FROM public.blocks
|
|
||||||
WHERE key = $1 AND block_number = $2`
|
|
||||||
|
|
||||||
for index, stateNode := range stateNodes {
|
|
||||||
var data []byte
|
|
||||||
err = db.Get(ctx, &data, pgIpfsGet, stateNode.MhKey, snapshotHeight)
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
expectedStateNode := expectedStateNodes[index]
|
|
||||||
expectedCID, _ := ipld.RawdataToCid(ipld.MEthStateTrie, expectedStateNode.Value, multihash.KECCAK_256)
|
|
||||||
test.ExpectEqual(t, strconv.Itoa(snapshotHeight), stateNode.BlockNumber)
|
|
||||||
test.ExpectEqual(t, fixt.Block4_Header.Hash().String(), stateNode.HeaderID)
|
|
||||||
test.ExpectEqual(t, expectedCID.String(), stateNode.CID)
|
|
||||||
test.ExpectEqual(t, int(expectedStateNode.NodeType), stateNode.NodeType)
|
|
||||||
test.ExpectEqual(t, expectedStateNode.Key, common.HexToHash(stateNode.StateKey))
|
|
||||||
test.ExpectEqual(t, false, stateNode.Diff)
|
|
||||||
test.ExpectEqualBytes(t, expectedStateNode.Path, stateNode.Path)
|
|
||||||
test.ExpectEqualBytes(t, expectedStateNode.Value, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check inplace snapshot was created for storage_cids
|
|
||||||
storageNodes := make([]models.StorageNodeModel, 0)
|
|
||||||
pgQueryStorageCids := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, storage_cids.state_path, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path, storage_cids.mh_key, storage_cids.header_id
|
|
||||||
FROM eth.storage_cids
|
|
||||||
WHERE eth.storage_cids.block_number = $1
|
|
||||||
ORDER BY storage_cids.state_path, storage_cids.storage_path`
|
|
||||||
err = db.Select(ctx, &storageNodes, pgQueryStorageCids, snapshotHeight)
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
for index, storageNode := range storageNodes {
|
|
||||||
expectedStorageNode := fixt.ExpectedStorageNodes[index]
|
|
||||||
expectedStorageCID, _ := ipld.RawdataToCid(ipld.MEthStorageTrie, expectedStorageNode.Value, multihash.KECCAK_256)
|
|
||||||
|
|
||||||
test.ExpectEqual(t, strconv.Itoa(snapshotHeight), storageNode.BlockNumber)
|
|
||||||
test.ExpectEqual(t, fixt.Block4_Header.Hash().String(), storageNode.HeaderID)
|
|
||||||
test.ExpectEqual(t, expectedStorageCID.String(), storageNode.CID)
|
|
||||||
test.ExpectEqual(t, int(expectedStorageNode.NodeType), storageNode.NodeType)
|
|
||||||
test.ExpectEqual(t, expectedStorageNode.Key, common.HexToHash(storageNode.StorageKey))
|
|
||||||
test.ExpectEqual(t, expectedStorageNode.StatePath, storageNode.StatePath)
|
|
||||||
test.ExpectEqual(t, expectedStorageNode.Path, storageNode.Path)
|
|
||||||
test.ExpectEqual(t, false, storageNode.Diff)
|
|
||||||
|
|
||||||
var data []byte
|
|
||||||
err = db.Get(ctx, &data, pgIpfsGet, storageNode.MhKey, snapshotHeight)
|
|
||||||
test.NoError(t, err)
|
|
||||||
test.ExpectEqualBytes(t, expectedStorageNode.Value, data)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,26 +0,0 @@
|
|||||||
package mock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
type anyOfMatcher struct {
|
|
||||||
values []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m anyOfMatcher) Matches(x interface{}) bool {
|
|
||||||
for _, v := range m.values {
|
|
||||||
if gomock.Eq(v).Matches(x) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (m anyOfMatcher) String() string {
|
|
||||||
return fmt.Sprintf("is equal to any of %+v", m.values)
|
|
||||||
}
|
|
||||||
func AnyOf(xs ...interface{}) anyOfMatcher {
|
|
||||||
return anyOfMatcher{xs}
|
|
||||||
}
|
|
@ -1,251 +0,0 @@
|
|||||||
// Copyright © 2020 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package pg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
||||||
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ snapt.Publisher = (*publisher)(nil)
|
|
||||||
|
|
||||||
const logInterval = 1 * time.Minute
|
|
||||||
|
|
||||||
// Publisher is wrapper around DB.
|
|
||||||
type publisher struct {
|
|
||||||
db *postgres.DB
|
|
||||||
currBatchSize uint
|
|
||||||
stateNodeCounter uint64
|
|
||||||
storageNodeCounter uint64
|
|
||||||
codeNodeCounter uint64
|
|
||||||
startTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPublisher creates Publisher
|
|
||||||
func NewPublisher(db *postgres.DB) *publisher {
|
|
||||||
return &publisher{
|
|
||||||
db: db,
|
|
||||||
startTime: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type pubTx struct {
|
|
||||||
sql.Tx
|
|
||||||
callback func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx pubTx) Rollback() error { return tx.Tx.Rollback(context.Background()) }
|
|
||||||
func (tx pubTx) Commit() error {
|
|
||||||
if tx.callback != nil {
|
|
||||||
defer tx.callback()
|
|
||||||
}
|
|
||||||
return tx.Tx.Commit(context.Background())
|
|
||||||
}
|
|
||||||
func (tx pubTx) Exec(sql string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return tx.Tx.Exec(context.Background(), sql, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) BeginTx() (snapt.Tx, error) {
|
|
||||||
tx, err := p.db.Begin(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
go p.logNodeCounters()
|
|
||||||
return pubTx{tx, func() {
|
|
||||||
p.printNodeCounters("final stats")
|
|
||||||
}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
|
||||||
// returns the CID and blockstore prefixed multihash key
|
|
||||||
func (tx pubTx) publishRaw(codec uint64, raw []byte, height *big.Int) (cid, prefixedKey string, err error) {
|
|
||||||
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cid = c.String()
|
|
||||||
prefixedKey, err = tx.publishIPLD(c, raw, height)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tx pubTx) publishIPLD(c cid.Cid, raw []byte, height *big.Int) (string, error) {
|
|
||||||
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
|
||||||
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
|
||||||
_, err := tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), height.Uint64(), prefixedKey, raw)
|
|
||||||
return prefixedKey, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary indexes in the header_cids table
|
|
||||||
func (p *publisher) PublishHeader(header *types.Header) (err error) {
|
|
||||||
headerNode, err := ipld.NewEthHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
snapTx, err := p.db.Begin(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tx := pubTx{snapTx, nil}
|
|
||||||
defer func() {
|
|
||||||
err = snapt.CommitOrRollback(tx, err)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("CommitOrRollback failed: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if _, err = tx.publishIPLD(headerNode.Cid(), headerNode.RawData(), header.Number); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
|
|
||||||
_, err = tx.Exec(snapt.TableHeader.ToInsertStatement(), header.Number.Uint64(), header.Hash().Hex(),
|
|
||||||
header.ParentHash.Hex(), headerNode.Cid().String(), "0", p.db.NodeID(), "0",
|
|
||||||
header.Root.Hex(), header.TxHash.Hex(), header.ReceiptHash.Hex(), header.UncleHash.Hex(),
|
|
||||||
header.Bloom.Bytes(), header.Time, mhKey, 0, header.Coinbase.String())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes in the state_cids table
|
|
||||||
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, height *big.Int, snapTx snapt.Tx) error {
|
|
||||||
var stateKey string
|
|
||||||
if !snapt.IsNullHash(node.Key) {
|
|
||||||
stateKey = node.Key.Hex()
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(pubTx)
|
|
||||||
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value, height)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = tx.Exec(snapt.TableStateNode.ToInsertStatement(),
|
|
||||||
height.Uint64(), headerID, stateKey, stateCIDStr, node.Path, node.NodeType, false, mhKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// increment state node counter.
|
|
||||||
atomic.AddUint64(&p.stateNodeCounter, 1)
|
|
||||||
prom.IncStateNodeCount()
|
|
||||||
|
|
||||||
// increment current batch size counter
|
|
||||||
p.currBatchSize += 2
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary indexes in the storage_cids table
|
|
||||||
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, height *big.Int, statePath []byte, snapTx snapt.Tx) error {
|
|
||||||
var storageKey string
|
|
||||||
if !snapt.IsNullHash(node.Key) {
|
|
||||||
storageKey = node.Key.Hex()
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(pubTx)
|
|
||||||
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value, height)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = tx.Exec(snapt.TableStorageNode.ToInsertStatement(),
|
|
||||||
height.Uint64(), headerID, statePath, storageKey, storageCIDStr, node.Path, node.NodeType, false, mhKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// increment storage node counter.
|
|
||||||
atomic.AddUint64(&p.storageNodeCounter, 1)
|
|
||||||
prom.IncStorageNodeCount()
|
|
||||||
|
|
||||||
// increment current batch size counter
|
|
||||||
p.currBatchSize += 2
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublishCode writes code to the ipfs backing pg datastore
|
|
||||||
func (p *publisher) PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
|
|
||||||
// no codec for code, doesn't matter though since blockstore key is multihash-derived
|
|
||||||
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tx := snapTx.(pubTx)
|
|
||||||
if _, err = tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), height.Uint64(), mhKey, codeBytes); err != nil {
|
|
||||||
return fmt.Errorf("error publishing code IPLD: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// increment code node counter.
|
|
||||||
atomic.AddUint64(&p.codeNodeCounter, 1)
|
|
||||||
prom.IncCodeNodeCount()
|
|
||||||
|
|
||||||
p.currBatchSize++
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
|
|
||||||
var err error
|
|
||||||
// maximum batch size reached, commit the current transaction and begin a new transaction.
|
|
||||||
if maxBatchSize <= p.currBatchSize {
|
|
||||||
if err = tx.Commit(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
snapTx, err := p.db.Begin(context.Background())
|
|
||||||
tx = pubTx{Tx: snapTx}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.currBatchSize = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// logNodeCounters periodically logs the number of node processed.
|
|
||||||
func (p *publisher) logNodeCounters() {
|
|
||||||
t := time.NewTicker(logInterval)
|
|
||||||
for range t.C {
|
|
||||||
p.printNodeCounters("progress")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *publisher) printNodeCounters(msg string) {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"runtime": time.Now().Sub(p.startTime).String(),
|
|
||||||
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
|
|
||||||
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
|
|
||||||
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
|
|
||||||
}).Info(msg)
|
|
||||||
}
|
|
@ -1,72 +0,0 @@
|
|||||||
package pg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
|
|
||||||
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
pgConfig = test.DefaultPgConfig
|
|
||||||
nodeInfo = test.DefaultNodeInfo
|
|
||||||
// tables ordered according to fkey depedencies
|
|
||||||
allTables = []*snapt.Table{
|
|
||||||
&snapt.TableIPLDBlock,
|
|
||||||
&snapt.TableNodeInfo,
|
|
||||||
&snapt.TableHeader,
|
|
||||||
&snapt.TableStateNode,
|
|
||||||
&snapt.TableStorageNode,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func writeData(t *testing.T, db *postgres.DB) *publisher {
|
|
||||||
pub := NewPublisher(db)
|
|
||||||
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
|
|
||||||
tx, err := pub.BeginTx()
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
headerID := fixt.Block1_Header.Hash().String()
|
|
||||||
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, fixt.Block1_Header.Number, tx))
|
|
||||||
|
|
||||||
test.NoError(t, tx.Commit())
|
|
||||||
return pub
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: DB user requires role membership "pg_read_server_files"
|
|
||||||
func TestBasic(t *testing.T) {
|
|
||||||
test.NeedsDB(t)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
driver, err := postgres.NewSQLXDriver(ctx, pgConfig, nodeInfo)
|
|
||||||
test.NoError(t, err)
|
|
||||||
db := postgres.NewPostgresDB(driver)
|
|
||||||
|
|
||||||
sql.TearDownDB(t, db)
|
|
||||||
|
|
||||||
_ = writeData(t, db)
|
|
||||||
|
|
||||||
// check header was successfully committed
|
|
||||||
pgQueryHeader := `SELECT cid, block_hash
|
|
||||||
FROM eth.header_cids
|
|
||||||
WHERE block_number = $1`
|
|
||||||
type res struct {
|
|
||||||
CID string
|
|
||||||
BlockHash string
|
|
||||||
}
|
|
||||||
var header res
|
|
||||||
err = db.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
|
|
||||||
&header.CID, &header.BlockHash)
|
|
||||||
test.NoError(t, err)
|
|
||||||
|
|
||||||
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
|
|
||||||
test.NoError(t, err)
|
|
||||||
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
|
|
||||||
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
|
|
||||||
}
|
|
@ -16,26 +16,26 @@
|
|||||||
package snapshot
|
package snapshot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
|
||||||
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/types"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
iter "github.com/vulcanize/go-eth-state-node-iterator"
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
|
||||||
. "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -49,471 +49,124 @@ var (
|
|||||||
// Service holds ethDB and stateDB to read data from lvldb and Publisher
|
// Service holds ethDB and stateDB to read data from lvldb and Publisher
|
||||||
// to publish trie in postgres DB.
|
// to publish trie in postgres DB.
|
||||||
type Service struct {
|
type Service struct {
|
||||||
watchingAddresses bool
|
|
||||||
ethDB ethdb.Database
|
ethDB ethdb.Database
|
||||||
stateDB state.Database
|
stateDB state.Database
|
||||||
ipfsPublisher Publisher
|
indexer indexer.Indexer
|
||||||
maxBatchSize uint
|
maxBatchSize uint
|
||||||
tracker iteratorTracker
|
|
||||||
recoveryFile string
|
recoveryFile string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLevelDB(con *EthConfig) (ethdb.Database, error) {
|
func NewEthDB(con *EthDBConfig) (ethdb.Database, error) {
|
||||||
edb, err := rawdb.NewLevelDBDatabaseWithFreezer(
|
return rawdb.Open(rawdb.OpenOptions{
|
||||||
con.LevelDBPath, 1024, 256, con.AncientDBPath, "ipld-eth-state-snapshot", true,
|
Directory: con.DBPath,
|
||||||
)
|
AncientsDirectory: con.AncientDBPath,
|
||||||
if err != nil {
|
Namespace: "ipld-eth-state-snapshot",
|
||||||
return nil, fmt.Errorf("unable to create NewLevelDBDatabaseWithFreezer: %s", err)
|
Cache: 1024,
|
||||||
}
|
Handles: 256,
|
||||||
return edb, nil
|
ReadOnly: true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSnapshotService creates Service.
|
// NewSnapshotService creates Service.
|
||||||
func NewSnapshotService(edb ethdb.Database, pub Publisher, recoveryFile string) (*Service, error) {
|
func NewSnapshotService(edb ethdb.Database, indexer indexer.Indexer, recoveryFile string) (*Service, error) {
|
||||||
return &Service{
|
return &Service{
|
||||||
ethDB: edb,
|
ethDB: edb,
|
||||||
stateDB: state.NewDatabase(edb),
|
stateDB: state.NewDatabase(edb),
|
||||||
ipfsPublisher: pub,
|
indexer: indexer,
|
||||||
maxBatchSize: defaultBatchSize,
|
maxBatchSize: defaultBatchSize,
|
||||||
recoveryFile: recoveryFile,
|
recoveryFile: recoveryFile,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotParams struct {
|
type SnapshotParams struct {
|
||||||
WatchedAddresses map[common.Address]struct{}
|
WatchedAddresses []common.Address
|
||||||
Height uint64
|
Height uint64
|
||||||
Workers uint
|
Workers uint
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) CreateSnapshot(params SnapshotParams) error {
|
func (s *Service) CreateSnapshot(params SnapshotParams) error {
|
||||||
paths := make([][]byte, 0, len(params.WatchedAddresses))
|
|
||||||
for addr := range params.WatchedAddresses {
|
|
||||||
paths = append(paths, keybytesToHex(crypto.Keccak256(addr.Bytes())))
|
|
||||||
}
|
|
||||||
s.watchingAddresses = len(paths) > 0
|
|
||||||
// extract header from lvldb and publish to PG-IPFS
|
// extract header from lvldb and publish to PG-IPFS
|
||||||
// hold onto the headerID so that we can link the state nodes to this header
|
// hold onto the headerID so that we can link the state nodes to this header
|
||||||
log.Infof("Creating snapshot at height %d", params.Height)
|
|
||||||
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
|
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
|
||||||
header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
|
header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
return fmt.Errorf("unable to read canonical header at height %d", params.Height)
|
return fmt.Errorf("unable to read canonical header at height %d", params.Height)
|
||||||
}
|
}
|
||||||
|
log.WithField("height", params.Height).WithField("hash", hash).Info("Creating snapshot")
|
||||||
|
|
||||||
log.Infof("head hash: %s head height: %d", hash.Hex(), params.Height)
|
// Context for snapshot work
|
||||||
|
|
||||||
err := s.ipfsPublisher.PublishHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tree, err := s.stateDB.OpenTrie(header.Root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
headerID := header.Hash().String()
|
|
||||||
|
|
||||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||||
s.tracker = newTracker(s.recoveryFile, int(params.Workers))
|
defer cancelCtx()
|
||||||
s.tracker.captureSignal(cancelCtx)
|
// Cancel context on receiving a signal. On cancellation, all tracked iterators complete
|
||||||
|
// processing of their current node before stopping.
|
||||||
|
captureSignal(cancelCtx)
|
||||||
|
|
||||||
var iters []trie.NodeIterator
|
var err error
|
||||||
// attempt to restore from recovery file if it exists
|
tx := s.indexer.BeginTx(header.Number, ctx)
|
||||||
iters, err = s.tracker.restore(tree)
|
defer tx.RollbackOnFailure(err)
|
||||||
|
|
||||||
|
var headerid string
|
||||||
|
headerid, err = s.indexer.PushHeader(tx, header, big.NewInt(0), big.NewInt(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("restore error: %s", err.Error())
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if iters != nil {
|
tr := prom.NewTracker(s.recoveryFile, params.Workers)
|
||||||
log.Debugf("restored iterators; count: %d", len(iters))
|
|
||||||
if params.Workers < uint(len(iters)) {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"number of recovered workers (%d) is greater than number configured (%d)",
|
|
||||||
len(iters), params.Workers,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// nothing to restore
|
|
||||||
log.Debugf("no iterators to restore")
|
|
||||||
if params.Workers > 1 {
|
|
||||||
iters = iter.SubtrieIterators(tree, params.Workers)
|
|
||||||
} else {
|
|
||||||
iters = []trie.NodeIterator{tree.NodeIterator(nil)}
|
|
||||||
}
|
|
||||||
for i, it := range iters {
|
|
||||||
// recovered path is nil for fresh iterators
|
|
||||||
iters[i] = s.tracker.tracked(it, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
err := s.tracker.haltAndDump()
|
err := tr.CloseAndSave()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to write recovery file: %v", err)
|
log.Errorf("failed to write recovery file: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
switch {
|
var nodeMtx, ipldMtx sync.Mutex
|
||||||
case len(iters) > 1:
|
nodeSink := func(node types.StateLeafNode) error {
|
||||||
return s.createSnapshotAsync(ctx, iters, headerID, new(big.Int).SetUint64(params.Height), paths)
|
nodeMtx.Lock()
|
||||||
case len(iters) == 1:
|
defer nodeMtx.Unlock()
|
||||||
return s.createSnapshot(ctx, iters[0], headerID, new(big.Int).SetUint64(params.Height), paths)
|
prom.IncStateNodeCount()
|
||||||
default:
|
prom.AddStorageNodeCount(len(node.StorageDiff))
|
||||||
return nil
|
return s.indexer.PushStateNode(tx, node, headerid)
|
||||||
}
|
}
|
||||||
|
ipldSink := func(c types.IPLD) error {
|
||||||
|
ipldMtx.Lock()
|
||||||
|
defer ipldMtx.Unlock()
|
||||||
|
return s.indexer.PushIPLD(tx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create snapshot up to head (ignores height param)
|
sdparams := statediff.Params{
|
||||||
func (s *Service) CreateLatestSnapshot(workers uint, watchedAddresses map[common.Address]struct{}) error {
|
WatchedAddresses: params.WatchedAddresses,
|
||||||
|
}
|
||||||
|
sdparams.ComputeWatchedAddressesLeafPaths()
|
||||||
|
builder := statediff.NewBuilder(adapt.GethStateView(s.stateDB))
|
||||||
|
builder.SetSubtrieWorkers(params.Workers)
|
||||||
|
if err = builder.WriteStateSnapshot(ctx, header.Root, sdparams, nodeSink, ipldSink, tr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = tx.Submit(); err != nil {
|
||||||
|
return fmt.Errorf("batch transaction submission failed: %w", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateLatestSnapshot snapshot at head (ignores height param)
|
||||||
|
func (s *Service) CreateLatestSnapshot(workers uint, watchedAddresses []common.Address) error {
|
||||||
log.Info("Creating snapshot at head")
|
log.Info("Creating snapshot at head")
|
||||||
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
|
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
|
||||||
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
|
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
|
||||||
if height == nil {
|
if height == nil {
|
||||||
return fmt.Errorf("unable to read header height for header hash %s", hash.String())
|
return fmt.Errorf("unable to read header height for header hash %s", hash)
|
||||||
}
|
}
|
||||||
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers, WatchedAddresses: watchedAddresses})
|
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers, WatchedAddresses: watchedAddresses})
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeResult struct {
|
func captureSignal(cb func()) {
|
||||||
node Node
|
sigChan := make(chan os.Signal, 1)
|
||||||
elements []interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveNode(nodePath []byte, it trie.NodeIterator, trieDB *trie.Database) (*nodeResult, error) {
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
// "leaf" nodes are actually "value" nodes, whose parents are the actual leaves
|
go func() {
|
||||||
if it.Leaf() {
|
sig := <-sigChan
|
||||||
return nil, nil
|
log.Errorf("Signal received (%v), stopping", sig)
|
||||||
}
|
cb()
|
||||||
if IsNullHash(it.Hash()) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// use full node path
|
|
||||||
// (it.Path() will give partial path in case of subtrie iterators)
|
|
||||||
path := make([]byte, len(nodePath))
|
|
||||||
copy(path, nodePath)
|
|
||||||
n, err := trieDB.Node(it.Hash())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var elements []interface{}
|
|
||||||
if err := rlp.DecodeBytes(n, &elements); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ty, err := CheckKeyType(elements)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &nodeResult{
|
|
||||||
node: Node{
|
|
||||||
NodeType: ty,
|
|
||||||
Path: path,
|
|
||||||
Value: n,
|
|
||||||
},
|
|
||||||
elements: elements,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validPath checks if a path is prefix to any one of the paths in the given list
|
|
||||||
func validPath(currentPath []byte, seekingPaths [][]byte) bool {
|
|
||||||
for _, seekingPath := range seekingPaths {
|
|
||||||
if bytes.HasPrefix(seekingPath, currentPath) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSnapshot performs traversal using the given iterator and indexes the nodes
|
|
||||||
// optionally filtering them according to a list of paths
|
|
||||||
func (s *Service) createSnapshot(ctx context.Context, it trie.NodeIterator, headerID string, height *big.Int, seekingPaths [][]byte) error {
|
|
||||||
tx, err := s.ipfsPublisher.BeginTx()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
err = CommitOrRollback(tx, err)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("CommitOrRollback failed: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// path (from recovery dump) to be seeked on recovery
|
|
||||||
// nil in case of a fresh iterator
|
|
||||||
var recoveredPath []byte
|
|
||||||
|
|
||||||
// latest path seeked from the concurrent iterator
|
|
||||||
// (updated after a node processed)
|
|
||||||
// nil in case of a fresh iterator; initially holds the recovered path in case of a recovered iterator
|
|
||||||
var seekedPath *[]byte
|
|
||||||
|
|
||||||
// end path for the concurrent iterator
|
|
||||||
var endPath []byte
|
|
||||||
|
|
||||||
if iter, ok := it.(*trackedIter); ok {
|
|
||||||
seekedPath = &iter.seekedPath
|
|
||||||
recoveredPath = append(recoveredPath, *seekedPath...)
|
|
||||||
endPath = iter.endPath
|
|
||||||
} else {
|
|
||||||
return errors.New("untracked iterator")
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.createSubTrieSnapshot(ctx, tx, nil, it, recoveredPath, seekedPath, endPath, headerID, height, seekingPaths)
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSubTrieSnapshot processes nodes at the next level of a trie using the given subtrie iterator
|
|
||||||
// continually updating seekedPath with path of the latest processed node
|
|
||||||
func (s *Service) createSubTrieSnapshot(ctx context.Context, tx Tx, prefixPath []byte, subTrieIt trie.NodeIterator, recoveredPath []byte, seekedPath *[]byte, endPath []byte, headerID string, height *big.Int, seekingPaths [][]byte) error {
|
|
||||||
prom.IncActiveIterCount()
|
|
||||||
defer prom.DecActiveIterCount()
|
|
||||||
|
|
||||||
// descend in the first loop iteration to reach first child node
|
|
||||||
descend := true
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return errors.New("ctx cancelled")
|
|
||||||
default:
|
|
||||||
if ok := subTrieIt.Next(descend); !ok {
|
|
||||||
return subTrieIt.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// to avoid descending further
|
|
||||||
descend = false
|
|
||||||
|
|
||||||
// move on to next node if current path is empty
|
|
||||||
// occurs when reaching root node or just before reaching the first child of a subtrie in case of some concurrent iterators
|
|
||||||
if bytes.Equal(subTrieIt.Path(), []byte{}) {
|
|
||||||
// if node path is empty and prefix is nil, it's the root node
|
|
||||||
if prefixPath == nil {
|
|
||||||
// create snapshot of node, if it is a leaf this will also create snapshot of entire storage trie
|
|
||||||
if err := s.createNodeSnapshot(tx, subTrieIt.Path(), subTrieIt, headerID, height); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
updateSeekedPath(seekedPath, subTrieIt.Path())
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok := subTrieIt.Next(true); !ok {
|
|
||||||
// return if no further nodes available
|
|
||||||
return subTrieIt.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create the full node path as it.Path() doesn't include the path before subtrie root
|
|
||||||
nodePath := append(prefixPath, subTrieIt.Path()...)
|
|
||||||
|
|
||||||
// check iterator upper bound before processing the node
|
|
||||||
// required to avoid processing duplicate nodes:
|
|
||||||
// if a node is considered more than once,
|
|
||||||
// it's whole subtrie is re-processed giving large number of duplicate nodoes
|
|
||||||
if !checkUpperPathBound(nodePath, endPath) {
|
|
||||||
// fmt.Println("failed checkUpperPathBound", nodePath, endPath)
|
|
||||||
// explicitly stop the iterator in tracker if upper bound check fails
|
|
||||||
// required since it won't be marked as stopped if further nodes are still available
|
|
||||||
if trackedSubtrieIt, ok := subTrieIt.(*trackedIter); ok {
|
|
||||||
s.tracker.stopIter(trackedSubtrieIt)
|
|
||||||
}
|
|
||||||
return subTrieIt.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip the current node if it's before recovered path and not along the recovered path
|
|
||||||
// nodes at the same level that are before recovered path are ignored to avoid duplicate nodes
|
|
||||||
// however, nodes along the recovered path are re-considered for redundancy
|
|
||||||
if bytes.Compare(recoveredPath, nodePath) > 0 &&
|
|
||||||
// a node is along the recovered path if it's path is shorter or equal in length
|
|
||||||
// and is part of the recovered path
|
|
||||||
!(len(nodePath) <= len(recoveredPath) && bytes.Equal(recoveredPath[:len(nodePath)], nodePath)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore node if it is not along paths of interest
|
|
||||||
if s.watchingAddresses && !validPath(nodePath, seekingPaths) {
|
|
||||||
// consider this node as processed since it is getting ignored
|
|
||||||
// and update the seeked path
|
|
||||||
updateSeekedPath(seekedPath, nodePath)
|
|
||||||
// move on to the next node
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the node is along paths of interest
|
|
||||||
// create snapshot of node, if it is a leaf this will also create snapshot of entire storage trie
|
|
||||||
if err := s.createNodeSnapshot(tx, nodePath, subTrieIt, headerID, height); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// update seeked path after node has been processed
|
|
||||||
updateSeekedPath(seekedPath, nodePath)
|
|
||||||
|
|
||||||
// create an iterator to traverse and process the next level of this subTrie
|
|
||||||
nextSubTrieIt, err := s.createSubTrieIt(nodePath, subTrieIt.Hash(), recoveredPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// pass on the seekedPath of the tracked concurrent iterator to be updated
|
|
||||||
if err := s.createSubTrieSnapshot(ctx, tx, nodePath, nextSubTrieIt, recoveredPath, seekedPath, endPath, headerID, height, seekingPaths); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createSubTrieIt creates an iterator to traverse the subtrie of node with the given hash
|
|
||||||
// the subtrie iterator is initialized at a node from the recovered path at corresponding level (if avaiable)
|
|
||||||
func (s *Service) createSubTrieIt(prefixPath []byte, hash common.Hash, recoveredPath []byte) (trie.NodeIterator, error) {
|
|
||||||
// skip directly to the node from the recovered path at corresponding level
|
|
||||||
// applicable if:
|
|
||||||
// node path is behind recovered path
|
|
||||||
// and recovered path includes the prefix path
|
|
||||||
var startPath []byte
|
|
||||||
if bytes.Compare(recoveredPath, prefixPath) > 0 &&
|
|
||||||
len(recoveredPath) > len(prefixPath) &&
|
|
||||||
bytes.Equal(recoveredPath[:len(prefixPath)], prefixPath) {
|
|
||||||
startPath = append(startPath, recoveredPath[len(prefixPath):len(prefixPath)+1]...)
|
|
||||||
// force the lower bound path to an even length
|
|
||||||
// (required by HexToKeyBytes())
|
|
||||||
if len(startPath)&0b1 == 1 {
|
|
||||||
// decrement first to avoid skipped nodes
|
|
||||||
decrementPath(startPath)
|
|
||||||
startPath = append(startPath, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create subTrie iterator with the given hash
|
|
||||||
subTrie, err := s.stateDB.OpenTrie(hash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return subTrie.NodeIterator(iter.HexToKeyBytes(startPath)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createNodeSnapshot indexes the current node
|
|
||||||
// entire storage trie is also indexed (if available)
|
|
||||||
func (s *Service) createNodeSnapshot(tx Tx, path []byte, it trie.NodeIterator, headerID string, height *big.Int) error {
|
|
||||||
res, err := resolveNode(path, it, s.stateDB.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch res.node.NodeType {
|
|
||||||
case Leaf:
|
|
||||||
// if the node is a leaf, decode the account and publish the associated storage trie
|
|
||||||
// nodes if there are any
|
|
||||||
var account types.StateAccount
|
|
||||||
if err := rlp.DecodeBytes(res.elements[1].([]byte), &account); err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"error decoding account for leaf node at path %x nerror: %v", res.node.Path, err)
|
|
||||||
}
|
|
||||||
partialPath := trie.CompactToHex(res.elements[0].([]byte))
|
|
||||||
valueNodePath := append(res.node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
res.node.Key = common.BytesToHash(leafKey)
|
|
||||||
if err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, height, tx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// publish any non-nil code referenced by codehash
|
|
||||||
if !bytes.Equal(account.CodeHash, emptyCodeHash) {
|
|
||||||
codeHash := common.BytesToHash(account.CodeHash)
|
|
||||||
codeBytes := rawdb.ReadCode(s.ethDB, codeHash)
|
|
||||||
if len(codeBytes) == 0 {
|
|
||||||
log.Error("Code is missing", "account", common.BytesToHash(it.LeafKey()))
|
|
||||||
return errors.New("missing code")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.ipfsPublisher.PublishCode(height, codeHash, codeBytes, tx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = s.storageSnapshot(account.Root, headerID, height, res.node.Path, tx); err != nil {
|
|
||||||
return fmt.Errorf("failed building storage snapshot for account %+v\r\nerror: %w", account, err)
|
|
||||||
}
|
|
||||||
case Extension, Branch:
|
|
||||||
res.node.Key = common.BytesToHash([]byte{})
|
|
||||||
if err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, height, tx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("unexpected node type")
|
|
||||||
}
|
|
||||||
return it.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Full-trie concurrent snapshot
|
|
||||||
func (s *Service) createSnapshotAsync(ctx context.Context, iters []trie.NodeIterator, headerID string, height *big.Int, seekingPaths [][]byte) error {
|
|
||||||
// use errgroup with a context to stop all concurrent iterators if one runs into an error
|
|
||||||
// each concurrent iterator completes processing it's current node before stopping
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
|
||||||
for _, it := range iters {
|
|
||||||
func(it trie.NodeIterator) {
|
|
||||||
g.Go(func() error {
|
|
||||||
return s.createSnapshot(ctx, it, headerID, height, seekingPaths)
|
|
||||||
})
|
|
||||||
}(it)
|
|
||||||
}
|
|
||||||
|
|
||||||
return g.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) storageSnapshot(sr common.Hash, headerID string, height *big.Int, statePath []byte, tx Tx) (Tx, error) {
|
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
|
||||||
return tx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sTrie, err := s.stateDB.OpenTrie(sr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
it := sTrie.NodeIterator(make([]byte, 0))
|
|
||||||
for it.Next(true) {
|
|
||||||
res, err := resolveNode(it.Path(), it, s.stateDB.TrieDB())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if res == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeData []byte
|
|
||||||
nodeData, err = s.stateDB.TrieDB().Node(it.Hash())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res.node.Value = nodeData
|
|
||||||
|
|
||||||
switch res.node.NodeType {
|
|
||||||
case Leaf:
|
|
||||||
partialPath := trie.CompactToHex(res.elements[0].([]byte))
|
|
||||||
valueNodePath := append(res.node.Path, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
res.node.Key = common.BytesToHash(leafKey)
|
|
||||||
case Extension, Branch:
|
|
||||||
res.node.Key = common.BytesToHash([]byte{})
|
|
||||||
default:
|
|
||||||
return nil, errors.New("unexpected node type")
|
|
||||||
}
|
|
||||||
if err = s.ipfsPublisher.PublishStorageNode(&res.node, headerID, height, statePath, tx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tx, it.Error()
|
|
||||||
}
|
}
|
||||||
|
@ -1,570 +1,269 @@
|
|||||||
package snapshot
|
package snapshot_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cerc-io/eth-testing/chains"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/golang/mock/gomock"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
"github.com/cerc-io/ipld-eth-state-snapshot/internal/mocks"
|
||||||
mock "github.com/vulcanize/ipld-eth-state-snapshot/mocks/snapshot"
|
. "github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
fixture "github.com/cerc-io/ipld-eth-state-snapshot/test"
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
stateNodeNotIndexedErr = "state node not indexed for path %v"
|
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
storageNodeNotIndexedErr = "storage node not indexed for state path %v, storage path %v"
|
|
||||||
|
|
||||||
unexpectedStateNodeErr = "got unexpected state node for path %v"
|
// Note: block 1 doesn't have storage nodes. TODO: add fixtures with storage nodes
|
||||||
unexpectedStorageNodeErr = "got unexpected storage node for state path %v, storage path %v"
|
// chainAblock1StateKeys = sliceToSet(fixture.ChainA_Block1_StateNodeLeafKeys)
|
||||||
|
chainAblock1IpldCids = sliceToSet(fixture.ChainA_Block1_IpldCids)
|
||||||
|
|
||||||
extraNodesIndexedErr = "number of nodes indexed (%v) is more than expected (max %v)"
|
subtrieWorkerCases = []uint{1, 4, 8, 16, 32}
|
||||||
)
|
)
|
||||||
|
|
||||||
func testConfig(leveldbpath, ancientdbpath string) *Config {
|
type selectiveData struct {
|
||||||
|
StateNodes map[string]*models.StateNodeModel
|
||||||
|
StorageNodes map[string]map[string]*models.StorageNodeModel
|
||||||
|
}
|
||||||
|
|
||||||
|
func testConfig(ethdbpath, ancientdbpath string) *Config {
|
||||||
return &Config{
|
return &Config{
|
||||||
Eth: &EthConfig{
|
Eth: &EthDBConfig{
|
||||||
LevelDBPath: leveldbpath,
|
DBPath: ethdbpath,
|
||||||
AncientDBPath: ancientdbpath,
|
AncientDBPath: ancientdbpath,
|
||||||
NodeInfo: test.DefaultNodeInfo,
|
NodeInfo: DefaultNodeInfo,
|
||||||
},
|
|
||||||
DB: &DBConfig{
|
|
||||||
URI: test.DefaultPgConfig.DbConnectionString(),
|
|
||||||
ConnConfig: test.DefaultPgConfig,
|
|
||||||
},
|
},
|
||||||
|
DB: &DefaultPgConfig,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeMocks(t *testing.T) (*mock.MockPublisher, *mock.MockTx) {
|
func TestSnapshot(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
runCase := func(t *testing.T, workers uint) {
|
||||||
pub := mock.NewMockPublisher(ctl)
|
params := SnapshotParams{Height: 1, Workers: workers}
|
||||||
tx := mock.NewMockTx(ctl)
|
data := doSnapshot(t, fixture.ChainA, params)
|
||||||
return pub, tx
|
verify_chainAblock1(t, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateSnapshot(t *testing.T) {
|
for _, tc := range subtrieWorkerCases {
|
||||||
runCase := func(t *testing.T, workers int) {
|
t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
|
||||||
// map: expected state path -> struct{}{}
|
|
||||||
expectedStateNodePaths := sync.Map{}
|
|
||||||
for _, path := range fixt.Block1_StateNodePaths {
|
|
||||||
expectedStateNodePaths.Store(string(path), struct{}{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, tx := makeMocks(t)
|
|
||||||
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Block1_Header))
|
|
||||||
pub.EXPECT().BeginTx().Return(tx, nil).
|
|
||||||
Times(workers)
|
|
||||||
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).
|
|
||||||
AnyTimes()
|
|
||||||
tx.EXPECT().Commit().
|
|
||||||
Times(workers)
|
|
||||||
pub.EXPECT().PublishStateNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Block1_Header.Hash().String()),
|
|
||||||
gomock.Eq(fixt.Block1_Header.Number),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
DoAndReturn(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
if _, ok := expectedStateNodePaths.Load(string(node.Path)); ok {
|
|
||||||
expectedStateNodePaths.Delete(string(node.Path))
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
Times(len(fixt.Block1_StateNodePaths))
|
|
||||||
|
|
||||||
// TODO: fixtures for storage node
|
|
||||||
// pub.EXPECT().PublishStorageNode(gomock.Eq(fixt.StorageNode), gomock.Eq(int64(0)), gomock.Any())
|
|
||||||
|
|
||||||
chainDataPath, ancientDataPath := fixt.GetChainDataPath("chaindata")
|
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
|
||||||
edb, err := NewLevelDB(config.Eth)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer edb.Close()
|
|
||||||
|
|
||||||
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
|
||||||
service, err := NewSnapshotService(edb, pub, recovery)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := SnapshotParams{Height: 1, Workers: uint(workers)}
|
|
||||||
err = service.CreateSnapshot(params)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if all expected state nodes are indexed
|
|
||||||
expectedStateNodePaths.Range(func(key, value any) bool {
|
|
||||||
t.Fatalf(stateNodeNotIndexedErr, []byte(key.(string)))
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []int{1, 4, 8, 16, 32}
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run("case", func(t *testing.T) { runCase(t, tc) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type indexedNode struct {
|
|
||||||
value snapt.Node
|
|
||||||
isIndexed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type storageNodeKey struct {
|
|
||||||
statePath string
|
|
||||||
storagePath string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountSelectiveSnapshot(t *testing.T) {
|
func TestAccountSelectiveSnapshot(t *testing.T) {
|
||||||
snapShotHeight := uint64(32)
|
height := uint64(32)
|
||||||
watchedAddresses := map[common.Address]struct{}{
|
watchedAddresses, expected := watchedAccountData_chainBblock32()
|
||||||
common.HexToAddress("0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"): {},
|
|
||||||
common.HexToAddress("0x0616F59D291a898e796a1FAD044C5926ed2103eC"): {},
|
runCase := func(t *testing.T, workers uint) {
|
||||||
|
params := SnapshotParams{
|
||||||
|
Height: height,
|
||||||
|
Workers: workers,
|
||||||
|
WatchedAddresses: watchedAddresses,
|
||||||
|
}
|
||||||
|
data := doSnapshot(t, fixture.ChainB, params)
|
||||||
|
expected.verify(t, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedStateNodeIndexes := []int{0, 1, 2, 6}
|
for _, tc := range subtrieWorkerCases {
|
||||||
|
t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
|
||||||
statePath33 := []byte{3, 3}
|
}
|
||||||
expectedStorageNodeIndexes33 := []int{0, 1, 2, 3, 4, 6, 8}
|
|
||||||
|
|
||||||
statePath12 := []byte{12}
|
|
||||||
expectedStorageNodeIndexes12 := []int{12, 14, 16}
|
|
||||||
|
|
||||||
runCase := func(t *testing.T, workers int) {
|
|
||||||
expectedStateNodes := sync.Map{}
|
|
||||||
|
|
||||||
for _, expectedStateNodeIndex := range expectedStateNodeIndexes {
|
|
||||||
path := fixt.Chain2_Block32_StateNodes[expectedStateNodeIndex].Path
|
|
||||||
expectedStateNodes.Store(string(path), indexedNode{
|
|
||||||
value: fixt.Chain2_Block32_StateNodes[expectedStateNodeIndex],
|
|
||||||
isIndexed: false,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedStorageNodes := sync.Map{}
|
func TestSnapshotRecovery(t *testing.T) {
|
||||||
|
runCase := func(t *testing.T, workers uint, interruptAt uint) {
|
||||||
for _, expectedStorageNodeIndex := range expectedStorageNodeIndexes33 {
|
params := SnapshotParams{Height: 1, Workers: workers}
|
||||||
path := fixt.Chain2_Block32_StorageNodes[expectedStorageNodeIndex].Path
|
data := doSnapshotWithRecovery(t, fixture.ChainA, params, interruptAt)
|
||||||
key := storageNodeKey{
|
verify_chainAblock1(t, data)
|
||||||
statePath: string(statePath33),
|
|
||||||
storagePath: string(path),
|
|
||||||
}
|
|
||||||
value := indexedNode{
|
|
||||||
value: fixt.Chain2_Block32_StorageNodes[expectedStorageNodeIndex].Node,
|
|
||||||
isIndexed: false,
|
|
||||||
}
|
|
||||||
expectedStorageNodes.Store(key, value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, expectedStorageNodeIndex := range expectedStorageNodeIndexes12 {
|
interrupts := make([]uint, 4)
|
||||||
path := fixt.Chain2_Block32_StorageNodes[expectedStorageNodeIndex].Path
|
for i := 0; i < len(interrupts); i++ {
|
||||||
key := storageNodeKey{
|
N := len(fixture.ChainA_Block1_StateNodeLeafKeys)
|
||||||
statePath: string(statePath12),
|
interrupts[i] = uint(rand.Intn(N/2) + N/4)
|
||||||
storagePath: string(path),
|
|
||||||
}
|
|
||||||
value := indexedNode{
|
|
||||||
value: fixt.Chain2_Block32_StorageNodes[expectedStorageNodeIndex].Node,
|
|
||||||
isIndexed: false,
|
|
||||||
}
|
|
||||||
expectedStorageNodes.Store(key, value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub, tx := makeMocks(t)
|
for _, tc := range subtrieWorkerCases {
|
||||||
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Chain2_Block32_Header))
|
for i, interrupt := range interrupts {
|
||||||
pub.EXPECT().BeginTx().Return(tx, nil).
|
t.Run(
|
||||||
Times(workers)
|
fmt.Sprintf("with %d subtries %d", tc, i),
|
||||||
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).
|
func(t *testing.T) { runCase(t, tc, interrupt) },
|
||||||
AnyTimes()
|
)
|
||||||
tx.EXPECT().Commit().
|
|
||||||
Times(workers)
|
|
||||||
pub.EXPECT().PublishCode(gomock.Eq(fixt.Chain2_Block32_Header.Number), gomock.Any(), gomock.Any(), gomock.Eq(tx)).
|
|
||||||
AnyTimes()
|
|
||||||
pub.EXPECT().PublishStateNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Number),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
Do(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
key := string(node.Path)
|
|
||||||
// Check published nodes
|
|
||||||
if expectedStateNode, ok := expectedStateNodes.Load(key); ok {
|
|
||||||
expectedVal := expectedStateNode.(indexedNode).value
|
|
||||||
test.ExpectEqual(t, expectedVal, *node)
|
|
||||||
|
|
||||||
// Mark expected node as indexed
|
|
||||||
expectedStateNodes.Store(key, indexedNode{
|
|
||||||
value: expectedVal,
|
|
||||||
isIndexed: true,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
AnyTimes()
|
|
||||||
pub.EXPECT().PublishStorageNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(new(big.Int).SetUint64(snapShotHeight)),
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
Do(func(node *snapt.Node, _ string, _ *big.Int, statePath []byte, _ snapt.Tx) error {
|
|
||||||
key := storageNodeKey{
|
|
||||||
statePath: string(statePath),
|
|
||||||
storagePath: string(node.Path),
|
|
||||||
}
|
}
|
||||||
// Check published nodes
|
|
||||||
if expectedStorageNode, ok := expectedStorageNodes.Load(key); ok {
|
|
||||||
expectedVal := expectedStorageNode.(indexedNode).value
|
|
||||||
test.ExpectEqual(t, expectedVal, *node)
|
|
||||||
|
|
||||||
// Mark expected node as indexed
|
|
||||||
expectedStorageNodes.Store(key, indexedNode{
|
|
||||||
value: expectedVal,
|
|
||||||
isIndexed: true,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStorageNodeErr, statePath, node.Path)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
AnyTimes()
|
|
||||||
|
|
||||||
chainDataPath, ancientDataPath := fixt.GetChainDataPath("chain2data")
|
func TestAccountSelectiveSnapshotRecovery(t *testing.T) {
|
||||||
|
height := uint64(32)
|
||||||
|
watchedAddresses, expected := watchedAccountData_chainBblock32()
|
||||||
|
|
||||||
|
runCase := func(t *testing.T, workers uint, interruptAt uint) {
|
||||||
|
params := SnapshotParams{
|
||||||
|
Height: height,
|
||||||
|
Workers: workers,
|
||||||
|
WatchedAddresses: watchedAddresses,
|
||||||
|
}
|
||||||
|
data := doSnapshotWithRecovery(t, fixture.ChainB, params, interruptAt)
|
||||||
|
expected.verify(t, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range subtrieWorkerCases {
|
||||||
|
t.Run(
|
||||||
|
fmt.Sprintf("with %d subtries", tc),
|
||||||
|
func(t *testing.T) { runCase(t, tc, 1) },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verify_chainAblock1(t *testing.T, data mocks.IndexerData) {
|
||||||
|
// Extract indexed keys and sort them for comparison
|
||||||
|
var indexedStateKeys []string
|
||||||
|
for _, stateNode := range data.StateNodes {
|
||||||
|
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
|
||||||
|
indexedStateKeys = append(indexedStateKeys, stateKey)
|
||||||
|
}
|
||||||
|
require.ElementsMatch(t, fixture.ChainA_Block1_StateNodeLeafKeys, indexedStateKeys)
|
||||||
|
|
||||||
|
ipldCids := make(map[string]struct{})
|
||||||
|
for _, ipld := range data.IPLDs {
|
||||||
|
ipldCids[ipld.CID] = struct{}{}
|
||||||
|
}
|
||||||
|
require.Equal(t, chainAblock1IpldCids, ipldCids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func watchedAccountData_chainBblock32() ([]common.Address, selectiveData) {
|
||||||
|
watchedAddresses := []common.Address{
|
||||||
|
// hash 0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b
|
||||||
|
common.HexToAddress("0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"),
|
||||||
|
// hash 0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d
|
||||||
|
common.HexToAddress("0x0616F59D291a898e796a1FAD044C5926ed2103eC"),
|
||||||
|
}
|
||||||
|
var expected selectiveData
|
||||||
|
expected.StateNodes = make(map[string]*models.StateNodeModel)
|
||||||
|
for _, index := range []int{0, 4} {
|
||||||
|
node := &fixture.ChainB_Block32_StateNodes[index]
|
||||||
|
expected.StateNodes[node.StateKey] = node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map account leaf keys to corresponding storage
|
||||||
|
expectedStorageNodeIndexes := []struct {
|
||||||
|
address common.Address
|
||||||
|
indexes []int
|
||||||
|
}{
|
||||||
|
{watchedAddresses[0], []int{9, 11}},
|
||||||
|
{watchedAddresses[1], []int{0, 1, 2, 4, 6}},
|
||||||
|
}
|
||||||
|
expected.StorageNodes = make(map[string]map[string]*models.StorageNodeModel)
|
||||||
|
for _, account := range expectedStorageNodeIndexes {
|
||||||
|
leafKey := crypto.Keccak256Hash(account.address[:]).String()
|
||||||
|
storageNodes := make(map[string]*models.StorageNodeModel)
|
||||||
|
for _, index := range account.indexes {
|
||||||
|
node := &fixture.ChainB_Block32_StorageNodes[index]
|
||||||
|
storageNodes[node.StorageKey] = node
|
||||||
|
}
|
||||||
|
expected.StorageNodes[leafKey] = storageNodes
|
||||||
|
}
|
||||||
|
return watchedAddresses, expected
|
||||||
|
}
|
||||||
|
|
||||||
|
func (expected selectiveData) verify(t *testing.T, data mocks.IndexerData) {
|
||||||
|
// check that all indexed nodes are expected and correct
|
||||||
|
indexedStateKeys := make(map[string]struct{})
|
||||||
|
for _, stateNode := range data.StateNodes {
|
||||||
|
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
|
||||||
|
indexedStateKeys[stateKey] = struct{}{}
|
||||||
|
require.Contains(t, expected.StateNodes, stateKey, "unexpected state node")
|
||||||
|
|
||||||
|
model := expected.StateNodes[stateKey]
|
||||||
|
require.Equal(t, model.CID, stateNode.AccountWrapper.CID)
|
||||||
|
require.Equal(t, model.Balance, stateNode.AccountWrapper.Account.Balance.String())
|
||||||
|
require.Equal(t, model.StorageRoot, stateNode.AccountWrapper.Account.Root.String())
|
||||||
|
|
||||||
|
expectedStorage := expected.StorageNodes[stateKey]
|
||||||
|
indexedStorageKeys := make(map[string]struct{})
|
||||||
|
for _, storageNode := range stateNode.StorageDiff {
|
||||||
|
storageKey := common.BytesToHash(storageNode.LeafKey).String()
|
||||||
|
indexedStorageKeys[storageKey] = struct{}{}
|
||||||
|
require.Contains(t, expectedStorage, storageKey, "unexpected storage node")
|
||||||
|
|
||||||
|
require.Equal(t, expectedStorage[storageKey].CID, storageNode.CID)
|
||||||
|
require.Equal(t, expectedStorage[storageKey].Value, storageNode.Value)
|
||||||
|
}
|
||||||
|
// check for completeness
|
||||||
|
for storageNode := range expectedStorage {
|
||||||
|
require.Contains(t, indexedStorageKeys, storageNode, "missing storage node")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// check for completeness
|
||||||
|
for stateNode := range expected.StateNodes {
|
||||||
|
require.Contains(t, indexedStateKeys, stateNode, "missing state node")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func doSnapshot(t *testing.T, chain *chains.Paths, params SnapshotParams) mocks.IndexerData {
|
||||||
|
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
config := testConfig(chainDataPath, ancientDataPath)
|
||||||
edb, err := NewLevelDB(config.Eth)
|
edb, err := NewEthDB(config.Eth)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer edb.Close()
|
defer edb.Close()
|
||||||
|
|
||||||
|
idx := mocks.NewIndexer(t)
|
||||||
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
||||||
service, err := NewSnapshotService(edb, pub, recovery)
|
service, err := NewSnapshotService(edb, idx, recovery)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := SnapshotParams{Height: snapShotHeight, Workers: uint(workers), WatchedAddresses: watchedAddresses}
|
|
||||||
err = service.CreateSnapshot(params)
|
err = service.CreateSnapshot(params)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
return idx.IndexerData
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedStateNodes.Range(func(key, value any) bool {
|
func doSnapshotWithRecovery(
|
||||||
if !value.(indexedNode).isIndexed {
|
t *testing.T,
|
||||||
t.Fatalf(stateNodeNotIndexedErr, []byte(key.(string)))
|
chain *chains.Paths,
|
||||||
return false
|
params SnapshotParams,
|
||||||
}
|
failAfter uint,
|
||||||
return true
|
) mocks.IndexerData {
|
||||||
})
|
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
|
||||||
expectedStorageNodes.Range(func(key, value any) bool {
|
|
||||||
if !value.(indexedNode).isIndexed {
|
|
||||||
t.Fatalf(storageNodeNotIndexedErr, []byte(key.(storageNodeKey).statePath), []byte(key.(storageNodeKey).storagePath))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []int{1, 4, 8, 16, 32}
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run("case", func(t *testing.T) { runCase(t, tc) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRecovery(t *testing.T) {
|
|
||||||
maxPathLength := 4
|
|
||||||
runCase := func(t *testing.T, workers int, interruptAt int32) {
|
|
||||||
// map: expected state path -> number of times it got published
|
|
||||||
expectedStateNodePaths := sync.Map{}
|
|
||||||
for _, path := range fixt.Block1_StateNodePaths {
|
|
||||||
expectedStateNodePaths.Store(string(path), 0)
|
|
||||||
}
|
|
||||||
var indexedStateNodesCount int32
|
|
||||||
|
|
||||||
pub, tx := makeMocks(t)
|
|
||||||
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Block1_Header))
|
|
||||||
pub.EXPECT().BeginTx().Return(tx, nil).MaxTimes(workers)
|
|
||||||
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
|
|
||||||
tx.EXPECT().Commit().MaxTimes(workers)
|
|
||||||
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
|
|
||||||
DoAndReturn(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
// Start throwing an error after a certain number of state nodes have been indexed
|
|
||||||
if indexedStateNodesCount >= interruptAt {
|
|
||||||
return errors.New("failingPublishStateNode")
|
|
||||||
} else {
|
|
||||||
if prevCount, ok := expectedStateNodePaths.Load(string(node.Path)); ok {
|
|
||||||
expectedStateNodePaths.Store(string(node.Path), prevCount.(int)+1)
|
|
||||||
atomic.AddInt32(&indexedStateNodesCount, 1)
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
MaxTimes(int(interruptAt) + workers)
|
|
||||||
|
|
||||||
chainDataPath, ancientDataPath := fixt.GetChainDataPath("chaindata")
|
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
config := testConfig(chainDataPath, ancientDataPath)
|
||||||
edb, err := NewLevelDB(config.Eth)
|
edb, err := NewEthDB(config.Eth)
|
||||||
if err != nil {
|
require.NoError(t, err)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer edb.Close()
|
defer edb.Close()
|
||||||
|
|
||||||
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
indexer := &mocks.InterruptingIndexer{
|
||||||
service, err := NewSnapshotService(edb, pub, recovery)
|
Indexer: mocks.NewIndexer(t),
|
||||||
if err != nil {
|
InterruptAfter: failAfter,
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
|
t.Logf("Will interrupt after %d state nodes", failAfter)
|
||||||
|
|
||||||
params := SnapshotParams{Height: 1, Workers: uint(workers)}
|
recoveryFile := filepath.Join(t.TempDir(), "recover.csv")
|
||||||
|
service, err := NewSnapshotService(edb, indexer, recoveryFile)
|
||||||
|
require.NoError(t, err)
|
||||||
err = service.CreateSnapshot(params)
|
err = service.CreateSnapshot(params)
|
||||||
if err == nil {
|
require.Error(t, err)
|
||||||
t.Fatal("expected an error")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = os.Stat(recovery); err != nil {
|
require.FileExists(t, recoveryFile)
|
||||||
t.Fatal("cannot stat recovery file:", err)
|
// We should only have processed nodes up to the break, plus an extra node per worker
|
||||||
}
|
require.LessOrEqual(t, len(indexer.StateNodes), int(indexer.InterruptAfter+params.Workers))
|
||||||
|
|
||||||
// Create new mocks for recovery
|
// use the nested mock indexer, to continue where it left off
|
||||||
recoveryPub, tx := makeMocks(t)
|
recoveryIndexer := indexer.Indexer
|
||||||
recoveryPub.EXPECT().PublishHeader(gomock.Eq(&fixt.Block1_Header))
|
service, err = NewSnapshotService(edb, recoveryIndexer, recoveryFile)
|
||||||
recoveryPub.EXPECT().BeginTx().Return(tx, nil).AnyTimes()
|
require.NoError(t, err)
|
||||||
recoveryPub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
|
|
||||||
tx.EXPECT().Commit().AnyTimes()
|
|
||||||
recoveryPub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
|
|
||||||
DoAndReturn(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
if prevCount, ok := expectedStateNodePaths.Load(string(node.Path)); ok {
|
|
||||||
expectedStateNodePaths.Store(string(node.Path), prevCount.(int)+1)
|
|
||||||
atomic.AddInt32(&indexedStateNodesCount, 1)
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
AnyTimes()
|
|
||||||
|
|
||||||
// Create a new snapshot service for recovery
|
|
||||||
recoveryService, err := NewSnapshotService(edb, recoveryPub, recovery)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = recoveryService.CreateSnapshot(params)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if recovery file has been deleted
|
|
||||||
_, err = os.Stat(recovery)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("recovery file still present")
|
|
||||||
} else {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if all state nodes are indexed after recovery
|
|
||||||
expectedStateNodePaths.Range(func(key, value any) bool {
|
|
||||||
if value.(int) == 0 {
|
|
||||||
t.Fatalf(stateNodeNotIndexedErr, []byte(key.(string)))
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// nodes along the recovery path get reindexed
|
|
||||||
maxStateNodesCount := len(fixt.Block1_StateNodePaths) + workers*maxPathLength
|
|
||||||
if indexedStateNodesCount > int32(maxStateNodesCount) {
|
|
||||||
t.Fatalf(extraNodesIndexedErr, indexedStateNodesCount, maxStateNodesCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []int{1, 2, 4, 8, 16, 32}
|
|
||||||
numInterrupts := 3
|
|
||||||
interrupts := make([]int32, numInterrupts)
|
|
||||||
for i := 0; i < numInterrupts; i++ {
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
interrupts[i] = rand.Int31n(int32(len(fixt.Block1_StateNodePaths)))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
for _, interrupt := range interrupts {
|
|
||||||
t.Run(fmt.Sprint("case", tc, interrupt), func(t *testing.T) { runCase(t, tc, interrupt) })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccountSelectiveRecovery(t *testing.T) {
|
|
||||||
maxPathLength := 2
|
|
||||||
snapShotHeight := uint64(32)
|
|
||||||
watchedAddresses := map[common.Address]struct{}{
|
|
||||||
common.HexToAddress("0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"): {},
|
|
||||||
common.HexToAddress("0x0616F59D291a898e796a1FAD044C5926ed2103eC"): {},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedStateNodeIndexes := []int{0, 1, 2, 6}
|
|
||||||
|
|
||||||
runCase := func(t *testing.T, workers int, interruptAt int32) {
|
|
||||||
// map: expected state path -> number of times it got published
|
|
||||||
expectedStateNodePaths := sync.Map{}
|
|
||||||
for _, expectedStateNodeIndex := range expectedStateNodeIndexes {
|
|
||||||
path := fixt.Chain2_Block32_StateNodes[expectedStateNodeIndex].Path
|
|
||||||
expectedStateNodePaths.Store(string(path), 0)
|
|
||||||
}
|
|
||||||
var indexedStateNodesCount int32
|
|
||||||
|
|
||||||
pub, tx := makeMocks(t)
|
|
||||||
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Chain2_Block32_Header))
|
|
||||||
pub.EXPECT().BeginTx().Return(tx, nil).Times(workers)
|
|
||||||
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
|
|
||||||
tx.EXPECT().Commit().Times(workers)
|
|
||||||
pub.EXPECT().PublishStateNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Number),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
DoAndReturn(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
// Start throwing an error after a certain number of state nodes have been indexed
|
|
||||||
if indexedStateNodesCount >= interruptAt {
|
|
||||||
return errors.New("failingPublishStateNode")
|
|
||||||
} else {
|
|
||||||
if prevCount, ok := expectedStateNodePaths.Load(string(node.Path)); ok {
|
|
||||||
expectedStateNodePaths.Store(string(node.Path), prevCount.(int)+1)
|
|
||||||
atomic.AddInt32(&indexedStateNodesCount, 1)
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
MaxTimes(int(interruptAt) + workers)
|
|
||||||
pub.EXPECT().PublishStorageNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(new(big.Int).SetUint64(snapShotHeight)),
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
AnyTimes()
|
|
||||||
pub.EXPECT().PublishCode(gomock.Eq(fixt.Chain2_Block32_Header.Number), gomock.Any(), gomock.Any(), gomock.Eq(tx)).
|
|
||||||
AnyTimes()
|
|
||||||
|
|
||||||
chainDataPath, ancientDataPath := fixt.GetChainDataPath("chain2data")
|
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
|
||||||
edb, err := NewLevelDB(config.Eth)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer edb.Close()
|
|
||||||
|
|
||||||
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
|
||||||
service, err := NewSnapshotService(edb, pub, recovery)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
params := SnapshotParams{Height: snapShotHeight, Workers: uint(workers), WatchedAddresses: watchedAddresses}
|
|
||||||
err = service.CreateSnapshot(params)
|
err = service.CreateSnapshot(params)
|
||||||
if err == nil {
|
require.NoError(t, err)
|
||||||
t.Fatal("expected an error")
|
|
||||||
|
return recoveryIndexer.IndexerData
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = os.Stat(recovery); err != nil {
|
func sliceToSet[T comparable](slice []T) map[T]struct{} {
|
||||||
t.Fatal("cannot stat recovery file:", err)
|
set := make(map[T]struct{})
|
||||||
}
|
for _, v := range slice {
|
||||||
|
set[v] = struct{}{}
|
||||||
// Create new mocks for recovery
|
|
||||||
recoveryPub, tx := makeMocks(t)
|
|
||||||
recoveryPub.EXPECT().PublishHeader(gomock.Eq(&fixt.Chain2_Block32_Header))
|
|
||||||
recoveryPub.EXPECT().BeginTx().Return(tx, nil).MaxTimes(workers)
|
|
||||||
recoveryPub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
|
|
||||||
tx.EXPECT().Commit().MaxTimes(workers)
|
|
||||||
recoveryPub.EXPECT().PublishStateNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Number),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
DoAndReturn(func(node *snapt.Node, _ string, _ *big.Int, _ snapt.Tx) error {
|
|
||||||
if prevCount, ok := expectedStateNodePaths.Load(string(node.Path)); ok {
|
|
||||||
expectedStateNodePaths.Store(string(node.Path), prevCount.(int)+1)
|
|
||||||
atomic.AddInt32(&indexedStateNodesCount, 1)
|
|
||||||
} else {
|
|
||||||
t.Fatalf(unexpectedStateNodeErr, node.Path)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}).
|
|
||||||
AnyTimes()
|
|
||||||
recoveryPub.EXPECT().PublishStorageNode(
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(fixt.Chain2_Block32_Header.Hash().String()),
|
|
||||||
gomock.Eq(new(big.Int).SetUint64(snapShotHeight)),
|
|
||||||
gomock.Any(),
|
|
||||||
gomock.Eq(tx)).
|
|
||||||
AnyTimes()
|
|
||||||
recoveryPub.EXPECT().PublishCode(gomock.Eq(fixt.Chain2_Block32_Header.Number), gomock.Any(), gomock.Any(), gomock.Eq(tx)).
|
|
||||||
AnyTimes()
|
|
||||||
|
|
||||||
// Create a new snapshot service for recovery
|
|
||||||
recoveryService, err := NewSnapshotService(edb, recoveryPub, recovery)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = recoveryService.CreateSnapshot(params)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if recovery file has been deleted
|
|
||||||
_, err = os.Stat(recovery)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("recovery file still present")
|
|
||||||
} else {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if all expected state nodes are indexed after recovery
|
|
||||||
expectedStateNodePaths.Range(func(key, value any) bool {
|
|
||||||
if value.(int) == 0 {
|
|
||||||
t.Fatalf(stateNodeNotIndexedErr, []byte(key.(string)))
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// nodes along the recovery path get reindexed
|
|
||||||
maxStateNodesCount := len(expectedStateNodeIndexes) + workers*maxPathLength
|
|
||||||
if indexedStateNodesCount > int32(maxStateNodesCount) {
|
|
||||||
t.Fatalf(extraNodesIndexedErr, indexedStateNodesCount, maxStateNodesCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []int{1, 2, 4, 8, 16, 32}
|
|
||||||
numInterrupts := 2
|
|
||||||
interrupts := make([]int32, numInterrupts)
|
|
||||||
for i := 0; i < numInterrupts; i++ {
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
|
||||||
interrupts[i] = rand.Int31n(int32(len(expectedStateNodeIndexes)))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
for _, interrupt := range interrupts {
|
|
||||||
t.Run(fmt.Sprint("case", tc, interrupt), func(t *testing.T) { runCase(t, tc, interrupt) })
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return set
|
||||||
}
|
}
|
||||||
|
@ -1,219 +0,0 @@
|
|||||||
package snapshot
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
iter "github.com/vulcanize/go-eth-state-node-iterator"
|
|
||||||
)
|
|
||||||
|
|
||||||
type trackedIter struct {
|
|
||||||
trie.NodeIterator
|
|
||||||
tracker *iteratorTracker
|
|
||||||
|
|
||||||
seekedPath []byte // latest path seeked from the tracked iterator
|
|
||||||
endPath []byte // endPath for the tracked iterator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *trackedIter) Next(descend bool) bool {
|
|
||||||
ret := it.NodeIterator.Next(descend)
|
|
||||||
|
|
||||||
if !ret {
|
|
||||||
if it.tracker.running {
|
|
||||||
it.tracker.stopChan <- it
|
|
||||||
} else {
|
|
||||||
log.Errorf("iterator stopped after tracker halted: path=%x", it.Path())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
type iteratorTracker struct {
|
|
||||||
recoveryFile string
|
|
||||||
|
|
||||||
startChan chan *trackedIter
|
|
||||||
stopChan chan *trackedIter
|
|
||||||
started map[*trackedIter]struct{}
|
|
||||||
stopped []*trackedIter
|
|
||||||
running bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTracker(file string, buf int) iteratorTracker {
|
|
||||||
return iteratorTracker{
|
|
||||||
recoveryFile: file,
|
|
||||||
startChan: make(chan *trackedIter, buf),
|
|
||||||
stopChan: make(chan *trackedIter, buf),
|
|
||||||
started: map[*trackedIter]struct{}{},
|
|
||||||
running: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *iteratorTracker) captureSignal(cancelCtx context.CancelFunc) {
|
|
||||||
sigChan := make(chan os.Signal, 1)
|
|
||||||
|
|
||||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
go func() {
|
|
||||||
sig := <-sigChan
|
|
||||||
log.Errorf("Signal received (%v), stopping", sig)
|
|
||||||
// cancel context on receiving a signal
|
|
||||||
// on ctx cancellation, all the iterators complete processing of their current node before stopping
|
|
||||||
cancelCtx()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wraps an iterator in a trackedIter. This should not be called once halts are possible.
|
|
||||||
func (tr *iteratorTracker) tracked(it trie.NodeIterator, recoveredPath []byte) (ret *trackedIter) {
|
|
||||||
// create seeked path of max capacity (65)
|
|
||||||
iterSeekedPath := make([]byte, 0, 65)
|
|
||||||
// intially populate seeked path with the recovered path
|
|
||||||
// to be used in trie traversal
|
|
||||||
if recoveredPath != nil {
|
|
||||||
iterSeekedPath = append(iterSeekedPath, recoveredPath...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the iterator being tracked is a PrefixBoundIterator, capture it's end path
|
|
||||||
// to be used in trie traversal
|
|
||||||
var endPath []byte
|
|
||||||
if boundedIter, ok := it.(*iter.PrefixBoundIterator); ok {
|
|
||||||
endPath = boundedIter.EndPath
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = &trackedIter{it, tr, iterSeekedPath, endPath}
|
|
||||||
tr.startChan <- ret
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// explicitly stops an iterator
|
|
||||||
func (tr *iteratorTracker) stopIter(it *trackedIter) {
|
|
||||||
tr.stopChan <- it
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumps iterator path and bounds to a text file so it can be restored later
|
|
||||||
func (tr *iteratorTracker) dump() error {
|
|
||||||
log.Debug("Dumping recovery state to: ", tr.recoveryFile)
|
|
||||||
var rows [][]string
|
|
||||||
for it := range tr.started {
|
|
||||||
var startPath []byte
|
|
||||||
var endPath []byte
|
|
||||||
if impl, ok := it.NodeIterator.(*iter.PrefixBoundIterator); ok {
|
|
||||||
// if the iterator being tracked is a PrefixBoundIterator,
|
|
||||||
// initialize start and end paths with its bounds
|
|
||||||
startPath = impl.StartPath
|
|
||||||
endPath = impl.EndPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// if seeked path and iterator path are non-empty, use iterator's path as startpath
|
|
||||||
if !bytes.Equal(it.seekedPath, []byte{}) && !bytes.Equal(it.Path(), []byte{}) {
|
|
||||||
startPath = it.Path()
|
|
||||||
}
|
|
||||||
|
|
||||||
rows = append(rows, []string{
|
|
||||||
fmt.Sprintf("%x", startPath),
|
|
||||||
fmt.Sprintf("%x", endPath),
|
|
||||||
fmt.Sprintf("%x", it.seekedPath),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := os.Create(tr.recoveryFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
out := csv.NewWriter(file)
|
|
||||||
|
|
||||||
return out.WriteAll(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempts to read iterator state from file
|
|
||||||
// if file doesn't exist, returns an empty slice with no error
|
|
||||||
func (tr *iteratorTracker) restore(tree state.Trie) ([]trie.NodeIterator, error) {
|
|
||||||
file, err := os.Open(tr.recoveryFile)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debug("Restoring recovery state from: ", tr.recoveryFile)
|
|
||||||
defer file.Close()
|
|
||||||
in := csv.NewReader(file)
|
|
||||||
in.FieldsPerRecord = 3
|
|
||||||
rows, err := in.ReadAll()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var ret []trie.NodeIterator
|
|
||||||
for _, row := range rows {
|
|
||||||
// pick up where each interval left off
|
|
||||||
var startPath []byte
|
|
||||||
var endPath []byte
|
|
||||||
var recoveredPath []byte
|
|
||||||
|
|
||||||
if len(row[0]) != 0 {
|
|
||||||
if _, err = fmt.Sscanf(row[0], "%x", &startPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(row[1]) != 0 {
|
|
||||||
if _, err = fmt.Sscanf(row[1], "%x", &endPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(row[2]) != 0 {
|
|
||||||
if _, err = fmt.Sscanf(row[2], "%x", &recoveredPath); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// force the lower bound path to an even length
|
|
||||||
// (required by HexToKeyBytes())
|
|
||||||
if len(startPath)&0b1 == 1 {
|
|
||||||
// decrement first to avoid skipped nodes
|
|
||||||
decrementPath(startPath)
|
|
||||||
startPath = append(startPath, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
it := iter.NewPrefixBoundIterator(tree.NodeIterator(iter.HexToKeyBytes(startPath)), startPath, endPath)
|
|
||||||
ret = append(ret, tr.tracked(it, recoveredPath))
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tr *iteratorTracker) haltAndDump() error {
|
|
||||||
tr.running = false
|
|
||||||
|
|
||||||
// drain any pending iterators
|
|
||||||
close(tr.startChan)
|
|
||||||
for start := range tr.startChan {
|
|
||||||
tr.started[start] = struct{}{}
|
|
||||||
}
|
|
||||||
close(tr.stopChan)
|
|
||||||
for stop := range tr.stopChan {
|
|
||||||
tr.stopped = append(tr.stopped, stop)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, stop := range tr.stopped {
|
|
||||||
delete(tr.started, stop)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tr.started) == 0 {
|
|
||||||
// if the tracker state is empty, erase any existing recovery file
|
|
||||||
err := os.Remove(tr.recoveryFile)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tr.dump()
|
|
||||||
}
|
|
@ -2,87 +2,75 @@ package snapshot
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
|
|
||||||
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
|
||||||
file "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/file"
|
|
||||||
pg "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/pg"
|
|
||||||
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPublisher(mode SnapshotMode, config *Config) (snapt.Publisher, error) {
|
// Estimate the number of iterations necessary to step from start to end.
|
||||||
switch mode {
|
func estimateSteps(start []byte, end []byte, depth int) uint64 {
|
||||||
case PgSnapshot:
|
// We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
|
||||||
driver, err := postgres.NewPGXDriver(context.Background(), config.DB.ConnConfig, config.Eth.NodeInfo)
|
// For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
|
||||||
if err != nil {
|
// start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
|
||||||
return nil, err
|
// be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
|
||||||
|
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
|
||||||
|
// to move from 0500:06 is 16.
|
||||||
|
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
|
||||||
|
if 0 == len(start) {
|
||||||
|
start = []byte{0x0}
|
||||||
|
}
|
||||||
|
if 0 == len(end) {
|
||||||
|
end = []byte{0x10}
|
||||||
|
}
|
||||||
|
normalizedStart := make([]byte, depth)
|
||||||
|
normalizedEnd := make([]byte, depth)
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
if i < len(start) {
|
||||||
|
normalizedStart[i] = start[i]
|
||||||
|
}
|
||||||
|
if i < len(end) {
|
||||||
|
normalizedEnd[i] = end[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return normalizedStart, normalizedEnd
|
||||||
}
|
}
|
||||||
|
|
||||||
prom.RegisterDBCollector(config.DB.ConnConfig.DatabaseName, driver)
|
// We have no need to handle negative exponents, so uints are fine.
|
||||||
|
pow := func(x uint64, y uint) uint64 {
|
||||||
return pg.NewPublisher(postgres.NewPostgresDB(driver)), nil
|
if 0 == y {
|
||||||
case FileSnapshot:
|
return 1
|
||||||
return file.NewPublisher(config.File.OutputDir, config.Eth.NodeInfo)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("invalid snapshot mode: %s", mode)
|
ret := x
|
||||||
|
for i := uint(0); i < y; i++ {
|
||||||
|
ret *= x
|
||||||
|
}
|
||||||
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subtracts 1 from the last byte in a path slice, carrying if needed.
|
// Fix the paths.
|
||||||
// Does nothing, returning false, for all-zero inputs.
|
start, end = normalizePathRange(start, end, depth)
|
||||||
func decrementPath(path []byte) bool {
|
|
||||||
// check for all zeros
|
// No negative distances, if the start is already >= end, the distance is 0.
|
||||||
allzero := true
|
if bytes.Compare(start, end) >= 0 {
|
||||||
for i := 0; i < len(path); i++ {
|
return 0
|
||||||
allzero = allzero && path[i] == 0
|
|
||||||
}
|
}
|
||||||
if allzero {
|
|
||||||
return false
|
// Subtract each component, right to left, carrying over if necessary.
|
||||||
}
|
difference := make([]byte, len(start))
|
||||||
for i := len(path) - 1; i >= 0; i-- {
|
var carry byte = 0
|
||||||
val := path[i]
|
for i := len(start) - 1; i >= 0; i-- {
|
||||||
path[i]--
|
result := end[i] - start[i] - carry
|
||||||
if val == 0 {
|
if result > 0xf && i > 0 {
|
||||||
path[i] = 0xf
|
result &= 0xf
|
||||||
|
carry = 1
|
||||||
} else {
|
} else {
|
||||||
return true
|
carry = 0
|
||||||
}
|
}
|
||||||
}
|
difference[i] = result
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/ethereum/go-ethereum/blob/master/trie/encoding.go#L97
|
// Calculate the result.
|
||||||
func keybytesToHex(str []byte) []byte {
|
var ret uint64 = 0
|
||||||
l := len(str)*2 + 1
|
for i := 0; i < len(difference); i++ {
|
||||||
var nibbles = make([]byte, l)
|
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
|
||||||
for i, b := range str {
|
|
||||||
nibbles[i*2] = b / 16
|
|
||||||
nibbles[i*2+1] = b % 16
|
|
||||||
}
|
|
||||||
nibbles[l-1] = 16
|
|
||||||
return nibbles
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateSeekedPath(seekedPath *[]byte, nodePath []byte) {
|
return ret
|
||||||
// assumes len(nodePath) <= max len(*seekedPath)
|
|
||||||
*seekedPath = (*seekedPath)[:len(nodePath)]
|
|
||||||
copy(*seekedPath, nodePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// checks that the provided node path is before the end path
|
|
||||||
func checkUpperPathBound(nodePath, endPath []byte) bool {
|
|
||||||
// every path is before nil endPath
|
|
||||||
if endPath == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(endPath)%2 == 0 {
|
|
||||||
// in case of even length endpath
|
|
||||||
// apply open interval filter since the node at endpath will be covered by the next iterator
|
|
||||||
return bytes.Compare(nodePath, endPath) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return bytes.Compare(nodePath, endPath) <= 0
|
|
||||||
}
|
}
|
||||||
|
@ -1,63 +0,0 @@
|
|||||||
// Copyright © 2020 Vulcanize, Inc
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
// node for holding trie node information
|
|
||||||
type Node struct {
|
|
||||||
NodeType nodeType
|
|
||||||
Path []byte
|
|
||||||
Key common.Hash
|
|
||||||
Value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// nodeType for explicitly setting type of node
|
|
||||||
type nodeType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
Branch nodeType = iota
|
|
||||||
Extension
|
|
||||||
Leaf
|
|
||||||
Removed
|
|
||||||
Unknown
|
|
||||||
)
|
|
||||||
|
|
||||||
// CheckKeyType checks what type of key we have
|
|
||||||
func CheckKeyType(elements []interface{}) (nodeType, error) {
|
|
||||||
if len(elements) > 2 {
|
|
||||||
return Branch, nil
|
|
||||||
}
|
|
||||||
if len(elements) < 2 {
|
|
||||||
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
|
|
||||||
}
|
|
||||||
switch elements[0].([]byte)[0] / 16 {
|
|
||||||
case '\x00':
|
|
||||||
return Extension, nil
|
|
||||||
case '\x01':
|
|
||||||
return Extension, nil
|
|
||||||
case '\x02':
|
|
||||||
return Leaf, nil
|
|
||||||
case '\x03':
|
|
||||||
return Leaf, nil
|
|
||||||
default:
|
|
||||||
return Unknown, fmt.Errorf("unknown hex prefix")
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Publisher interface {
|
|
||||||
PublishHeader(header *types.Header) error
|
|
||||||
PublishStateNode(node *Node, headerID string, height *big.Int, tx Tx) error
|
|
||||||
PublishStorageNode(node *Node, headerID string, height *big.Int, statePath []byte, tx Tx) error
|
|
||||||
PublishCode(height *big.Int, codeHash common.Hash, codeBytes []byte, tx Tx) error
|
|
||||||
BeginTx() (Tx, error)
|
|
||||||
PrepareTxForBatch(tx Tx, batchSize uint) (Tx, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Tx interface {
|
|
||||||
Rollback() error
|
|
||||||
Commit() error
|
|
||||||
}
|
|
@ -1,76 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
var TableIPLDBlock = Table{
|
|
||||||
`public.blocks`,
|
|
||||||
[]column{
|
|
||||||
{"block_number", bigint},
|
|
||||||
{"key", text},
|
|
||||||
{"data", bytea},
|
|
||||||
},
|
|
||||||
"ON CONFLICT (key, block_number) DO NOTHING",
|
|
||||||
}
|
|
||||||
|
|
||||||
var TableNodeInfo = Table{
|
|
||||||
Name: `public.nodes`,
|
|
||||||
Columns: []column{
|
|
||||||
{"genesis_block", varchar},
|
|
||||||
{"network_id", varchar},
|
|
||||||
{"node_id", varchar},
|
|
||||||
{"client_name", varchar},
|
|
||||||
{"chain_id", integer},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var TableHeader = Table{
|
|
||||||
"eth.header_cids",
|
|
||||||
[]column{
|
|
||||||
{"block_number", bigint},
|
|
||||||
{"block_hash", varchar},
|
|
||||||
{"parent_hash", varchar},
|
|
||||||
{"cid", text},
|
|
||||||
{"td", numeric},
|
|
||||||
{"node_id", varchar},
|
|
||||||
{"reward", numeric},
|
|
||||||
{"state_root", varchar},
|
|
||||||
{"tx_root", varchar},
|
|
||||||
{"receipt_root", varchar},
|
|
||||||
{"uncle_root", varchar},
|
|
||||||
{"bloom", bytea},
|
|
||||||
{"timestamp", numeric},
|
|
||||||
{"mh_key", text},
|
|
||||||
{"times_validated", integer},
|
|
||||||
{"coinbase", varchar},
|
|
||||||
},
|
|
||||||
"ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = (EXCLUDED.parent_hash, EXCLUDED.cid, EXCLUDED.td, EXCLUDED.node_id, EXCLUDED.reward, EXCLUDED.state_root, EXCLUDED.tx_root, EXCLUDED.receipt_root, EXCLUDED.uncle_root, EXCLUDED.bloom, EXCLUDED.timestamp, EXCLUDED.mh_key, eth.header_cids.times_validated + 1, EXCLUDED.coinbase)",
|
|
||||||
}
|
|
||||||
|
|
||||||
var TableStateNode = Table{
|
|
||||||
"eth.state_cids",
|
|
||||||
[]column{
|
|
||||||
{"block_number", bigint},
|
|
||||||
{"header_id", varchar},
|
|
||||||
{"state_leaf_key", varchar},
|
|
||||||
{"cid", text},
|
|
||||||
{"state_path", bytea},
|
|
||||||
{"node_type", integer},
|
|
||||||
{"diff", boolean},
|
|
||||||
{"mh_key", text},
|
|
||||||
},
|
|
||||||
"ON CONFLICT (header_id, state_path, block_number) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.state_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)",
|
|
||||||
}
|
|
||||||
|
|
||||||
var TableStorageNode = Table{
|
|
||||||
"eth.storage_cids",
|
|
||||||
[]column{
|
|
||||||
{"block_number", bigint},
|
|
||||||
{"header_id", varchar},
|
|
||||||
{"state_path", bytea},
|
|
||||||
{"storage_leaf_key", varchar},
|
|
||||||
{"cid", text},
|
|
||||||
{"storage_path", bytea},
|
|
||||||
{"node_type", integer},
|
|
||||||
{"diff", boolean},
|
|
||||||
{"mh_key", text},
|
|
||||||
},
|
|
||||||
"ON CONFLICT (header_id, state_path, storage_path, block_number) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.storage_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)",
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type colType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
integer colType = iota
|
|
||||||
boolean
|
|
||||||
bigint
|
|
||||||
numeric
|
|
||||||
bytea
|
|
||||||
varchar
|
|
||||||
text
|
|
||||||
)
|
|
||||||
|
|
||||||
type column struct {
|
|
||||||
name string
|
|
||||||
typ colType
|
|
||||||
}
|
|
||||||
type Table struct {
|
|
||||||
Name string
|
|
||||||
Columns []column
|
|
||||||
conflictClause string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
|
|
||||||
var row []string
|
|
||||||
for i, col := range tbl.Columns {
|
|
||||||
row = append(row, col.typ.formatter()(args[i]))
|
|
||||||
}
|
|
||||||
return row
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tbl *Table) ToInsertStatement() string {
|
|
||||||
var colnames, placeholders []string
|
|
||||||
for i, col := range tbl.Columns {
|
|
||||||
colnames = append(colnames, col.name)
|
|
||||||
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"INSERT INTO %s (%s) VALUES (%s) %s",
|
|
||||||
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "), tbl.conflictClause,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
type colfmt = func(interface{}) string
|
|
||||||
|
|
||||||
func sprintf(f string) colfmt {
|
|
||||||
return func(x interface{}) string { return fmt.Sprintf(f, x) }
|
|
||||||
}
|
|
||||||
|
|
||||||
func (typ colType) formatter() colfmt {
|
|
||||||
switch typ {
|
|
||||||
case integer:
|
|
||||||
return sprintf("%d")
|
|
||||||
case boolean:
|
|
||||||
return func(x interface{}) string {
|
|
||||||
if x.(bool) {
|
|
||||||
return "t"
|
|
||||||
}
|
|
||||||
return "f"
|
|
||||||
}
|
|
||||||
case bigint:
|
|
||||||
return sprintf("%s")
|
|
||||||
case numeric:
|
|
||||||
return sprintf("%d")
|
|
||||||
case bytea:
|
|
||||||
return sprintf(`\x%x`)
|
|
||||||
case varchar:
|
|
||||||
return sprintf("%s")
|
|
||||||
case text:
|
|
||||||
return sprintf("%s")
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
var nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
|
||||||
|
|
||||||
func IsNullHash(hash common.Hash) bool {
|
|
||||||
return bytes.Equal(hash.Bytes(), nullHash.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func CommitOrRollback(tx Tx, err error) error {
|
|
||||||
var rberr error
|
|
||||||
defer func() {
|
|
||||||
if rberr != nil {
|
|
||||||
logrus.Errorf("rollback failed: %s", rberr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if rec := recover(); rec != nil {
|
|
||||||
rberr = tx.Rollback()
|
|
||||||
panic(rec)
|
|
||||||
} else if err != nil {
|
|
||||||
rberr = tx.Rollback()
|
|
||||||
} else {
|
|
||||||
err = tx.Commit()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
73
scripts/README.md
Normal file
73
scripts/README.md
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
## Data Validation
|
||||||
|
|
||||||
|
* For a given table in the `ipld-eth-db` schema, we know the number of columns to be expected in each row in the data dump:
|
||||||
|
|
||||||
|
| Table | Expected columns |
|
||||||
|
|--------------------|:----------------:|
|
||||||
|
| `public.nodes` | 5 |
|
||||||
|
| `ipld.blocks` | 3 |
|
||||||
|
| `eth.header_cids` | 16 |
|
||||||
|
| `eth.state_cids` | 8 |
|
||||||
|
| `eth.storage_cids` | 9 |
|
||||||
|
|
||||||
|
### Find Bad Data
|
||||||
|
|
||||||
|
* Run the following command to find any rows having unexpected number of columns:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d [include-data]
|
||||||
|
```
|
||||||
|
|
||||||
|
* `input-file` `-i`: Input data file path
|
||||||
|
* `expected-columns` `-c`: Expected number of columns in each row of the input file
|
||||||
|
* `output-file` `-o`: Output destination file path (default: `STDOUT`)
|
||||||
|
* `include-data` `-d`: Whether to include the data row in the output (`true | false`) (default: `false`)
|
||||||
|
* The output is of format: row number, number of columns, the data row
|
||||||
|
|
||||||
|
Eg:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
|
||||||
|
```
|
||||||
|
|
||||||
|
Eg:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/find-bad-rows.sh -i public.nodes.csv -c 5 -o res.txt -d true
|
||||||
|
./scripts/find-bad-rows.sh -i ipld.blocks.csv -c 3 -o res.txt -d true
|
||||||
|
./scripts/find-bad-rows.sh -i eth.header_cids.csv -c 16 -o res.txt -d true
|
||||||
|
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
||||||
|
./scripts/find-bad-rows.sh -i eth.storage_cids.csv -c 9 -o res.txt -d true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Cleanup
|
||||||
|
|
||||||
|
* In case of column count mismatch, data from `file` mode dumps can't be imported readily into `ipld-eth-db`.
|
||||||
|
|
||||||
|
### Filter Bad Data
|
||||||
|
|
||||||
|
* Run the following command to filter out rows having unexpected number of columns:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
|
||||||
|
```
|
||||||
|
|
||||||
|
* `input-file` `-i`: Input data file path
|
||||||
|
* `expected-columns` `-c`: Expected number of columns in each row of the input file
|
||||||
|
* `output-file` `-o`: Output destination file path
|
||||||
|
|
||||||
|
Eg:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/filter-bad-rows.sh -i public.nodes.csv -c 5 -o cleaned-public.nodes.csv
|
||||||
|
./scripts/filter-bad-rows.sh -i ipld.blocks.csv -c 3 -o cleaned-ipld.blocks.csv
|
||||||
|
./scripts/filter-bad-rows.sh -i eth.header_cids.csv -c 16 -o cleaned-eth.header_cids.csv
|
||||||
|
./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
|
||||||
|
./scripts/filter-bad-rows.sh -i eth.storage_cids.csv -c 9 -o cleaned-eth.storage_cids.csv
|
||||||
|
```
|
87
scripts/compare-snapshots.sh
Executable file
87
scripts/compare-snapshots.sh
Executable file
@ -0,0 +1,87 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Compare the full snapshot output from two versions of the service
|
||||||
|
#
|
||||||
|
# Usage: compare-versions.sh [-d <output-dir>] <binary-A> <binary-B>
|
||||||
|
|
||||||
|
# Configure the input data using environment vars.
|
||||||
|
(
|
||||||
|
set -u
|
||||||
|
: $SNAPSHOT_BLOCK_HEIGHT
|
||||||
|
: $ETHDB_PATH
|
||||||
|
: $ETHDB_ANCIENT
|
||||||
|
: $ETH_GENESIS_BLOCK
|
||||||
|
)
|
||||||
|
|
||||||
|
while getopts d: opt; do
|
||||||
|
case $opt in
|
||||||
|
d) output_dir="$OPTARG"
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND - 1))
|
||||||
|
|
||||||
|
binary_A=$1
|
||||||
|
binary_B=$2
|
||||||
|
shift 2
|
||||||
|
|
||||||
|
if [[ -z $output_dir ]]; then
|
||||||
|
output_dir=$(mktemp -d)
|
||||||
|
fi
|
||||||
|
|
||||||
|
export SNAPSHOT_MODE=postgres
|
||||||
|
export SNAPSHOT_WORKERS=32
|
||||||
|
export SNAPSHOT_RECOVERY_FILE='compare-snapshots-recovery.txt'
|
||||||
|
|
||||||
|
export DATABASE_NAME="cerc_testing"
|
||||||
|
export DATABASE_HOSTNAME="localhost"
|
||||||
|
export DATABASE_PORT=8077
|
||||||
|
export DATABASE_USER="vdbm"
|
||||||
|
export DATABASE_PASSWORD="password"
|
||||||
|
|
||||||
|
export ETH_CLIENT_NAME=test-client
|
||||||
|
export ETH_NODE_ID=test-node
|
||||||
|
export ETH_NETWORK_ID=test-network
|
||||||
|
export ETH_CHAIN_ID=4242
|
||||||
|
|
||||||
|
dump_table() {
|
||||||
|
statement="copy (select * from $1) to stdout with csv"
|
||||||
|
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -q cerc_testing -U vdbm -c "$statement" | sort -u > "$2/$1.csv"
|
||||||
|
}
|
||||||
|
|
||||||
|
clear_table() {
|
||||||
|
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
||||||
|
psql -q cerc_testing -U vdbm -c "truncate $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
tables=(
|
||||||
|
eth.log_cids
|
||||||
|
eth.receipt_cids
|
||||||
|
eth.state_cids
|
||||||
|
eth.storage_cids
|
||||||
|
eth.transaction_cids
|
||||||
|
eth.uncle_cids
|
||||||
|
ipld.blocks
|
||||||
|
public.nodes
|
||||||
|
)
|
||||||
|
|
||||||
|
for table in "${tables[@]}"; do
|
||||||
|
clear_table $table
|
||||||
|
done
|
||||||
|
|
||||||
|
$binary_A stateSnapshot
|
||||||
|
|
||||||
|
mkdir -p $output_dir/A
|
||||||
|
for table in "${tables[@]}"; do
|
||||||
|
dump_table $table $output_dir/A
|
||||||
|
clear_table $table
|
||||||
|
done
|
||||||
|
|
||||||
|
$binary_B stateSnapshot
|
||||||
|
|
||||||
|
mkdir -p $output_dir/B
|
||||||
|
for table in "${tables[@]}"; do
|
||||||
|
dump_table $table $output_dir/B
|
||||||
|
clear_table $table
|
||||||
|
done
|
||||||
|
|
||||||
|
diff -rs $output_dir/A $output_dir/B
|
29
scripts/filter-bad-rows.sh
Executable file
29
scripts/filter-bad-rows.sh
Executable file
@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# flags
|
||||||
|
# -i <input-file>: Input data file path
|
||||||
|
# -c <expected-columns>: Expected number of columns in each row of the input file
|
||||||
|
# -o [output-file]: Output destination file path
|
||||||
|
|
||||||
|
# eg: ./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
|
||||||
|
|
||||||
|
while getopts i:c:o: OPTION
|
||||||
|
do
|
||||||
|
case "${OPTION}" in
|
||||||
|
i) inputFile=${OPTARG};;
|
||||||
|
c) expectedColumns=${OPTARG};;
|
||||||
|
o) outputFile=${OPTARG};;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
timestamp=$(date +%s)
|
||||||
|
|
||||||
|
# select only rows having expected number of columns
|
||||||
|
if [ -z "${outputFile}" ]; then
|
||||||
|
echo "Invalid destination file arg (-o) ${outputFile}"
|
||||||
|
else
|
||||||
|
awk -F"," "NF==${expectedColumns}" ${inputFile} > ${outputFile}
|
||||||
|
fi
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-timestamp))
|
||||||
|
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)
|
43
scripts/find-bad-rows.sh
Executable file
43
scripts/find-bad-rows.sh
Executable file
@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# flags
|
||||||
|
# -i <input-file>: Input data file path
|
||||||
|
# -c <expected-columns>: Expected number of columns in each row of the input file
|
||||||
|
# -o [output-file]: Output destination file path (default: STDOUT)
|
||||||
|
# -d [include-data]: Whether to include the data row in output (true | false) (default: false)
|
||||||
|
|
||||||
|
# eg: ./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
||||||
|
# output: 1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,
|
||||||
|
# baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,
|
||||||
|
# DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
|
||||||
|
|
||||||
|
while getopts i:c:o:d: OPTION
|
||||||
|
do
|
||||||
|
case "${OPTION}" in
|
||||||
|
i) inputFile=${OPTARG};;
|
||||||
|
c) expectedColumns=${OPTARG};;
|
||||||
|
o) outputFile=${OPTARG};;
|
||||||
|
d) data=${OPTARG};;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
timestamp=$(date +%s)
|
||||||
|
|
||||||
|
# if data requested, dump row number, number of columns and the row
|
||||||
|
if [ "${data}" = true ] ; then
|
||||||
|
if [ -z "${outputFile}" ]; then
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile}
|
||||||
|
else
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile} > ${outputFile}
|
||||||
|
fi
|
||||||
|
# else, dump only row number, number of columns
|
||||||
|
else
|
||||||
|
if [ -z "${outputFile}" ]; then
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile}
|
||||||
|
else
|
||||||
|
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile} > ${outputFile}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
difference=$(($(date +%s)-timestamp))
|
||||||
|
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)
|
63
startup_script.sh
Executable file
63
startup_script.sh
Executable file
@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Exit if the variable tests fail
|
||||||
|
set -e
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
|
env
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check the database variables are set
|
||||||
|
test "$VDB_COMMAND"
|
||||||
|
|
||||||
|
# docker must be run in privileged mode for mounts to work
|
||||||
|
echo "Setting up /app/geth-rw overlayed /app/geth-ro"
|
||||||
|
mkdir -p /tmp/overlay
|
||||||
|
mount -t tmpfs tmpfs /tmp/overlay
|
||||||
|
mkdir -p /tmp/overlay/upper
|
||||||
|
mkdir -p /tmp/overlay/work
|
||||||
|
mkdir -p /app/geth-rw
|
||||||
|
|
||||||
|
mount -t overlay overlay -o lowerdir=/app/geth-ro,upperdir=/tmp/overlay/upper,workdir=/tmp/overlay/work /app/geth-rw
|
||||||
|
|
||||||
|
mkdir /var/run/statediff
|
||||||
|
cd /var/run/statediff
|
||||||
|
|
||||||
|
SETUID=""
|
||||||
|
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
|
||||||
|
SETUID="su-exec $TARGET_UID:$TARGET_GID"
|
||||||
|
chown -R $TARGET_UID:$TARGET_GID /var/run/statediff
|
||||||
|
fi
|
||||||
|
|
||||||
|
START_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
|
||||||
|
echo "Running the snapshot service" && \
|
||||||
|
if [[ -n "$LOG_FILE" ]]; then
|
||||||
|
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $* |& $SETUID tee ${LOG_FILE}.console
|
||||||
|
rc=$?
|
||||||
|
else
|
||||||
|
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $*
|
||||||
|
rc=$?
|
||||||
|
fi
|
||||||
|
STOP_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
|
||||||
|
|
||||||
|
if [ $rc -eq 0 ] && [ "$VDB_COMMAND" == "stateSnapshot" ] && [ -n "$SNAPSHOT_BLOCK_HEIGHT" ]; then
|
||||||
|
cat >metadata.json <<EOF
|
||||||
|
{
|
||||||
|
"type": "snapshot",
|
||||||
|
"range": { "start": $SNAPSHOT_BLOCK_HEIGHT, "stop": $SNAPSHOT_BLOCK_HEIGHT },
|
||||||
|
"nodeId": "$ETH_NODE_ID",
|
||||||
|
"genesisBlock": "$ETH_GENESIS_BLOCK",
|
||||||
|
"networkId": "$ETH_NETWORK_ID",
|
||||||
|
"chainId": "$ETH_CHAIN_ID",
|
||||||
|
"time": { "start": "$START_TIME", "stop": "$STOP_TIME" }
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
|
||||||
|
echo 'metadata.json' | cpio -p --owner $TARGET_UID:$TARGET_GID $FILE_OUTPUT_DIR
|
||||||
|
else
|
||||||
|
cp metadata.json $FILE_OUTPUT_DIR
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit $rc
|
23
test/ci-config.toml
Normal file
23
test/ci-config.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[database]
|
||||||
|
name = "cerc_testing"
|
||||||
|
hostname = "127.0.0.1"
|
||||||
|
port = 8077
|
||||||
|
user = "vdbm"
|
||||||
|
password = "password"
|
||||||
|
|
||||||
|
[log]
|
||||||
|
level = "debug"
|
||||||
|
|
||||||
|
[snapshot]
|
||||||
|
workers = 4
|
||||||
|
recoveryFile = "snapshot_recovery_file"
|
||||||
|
# Note: these are overriden in the workflow step
|
||||||
|
# mode = "postgres"
|
||||||
|
# blockHeight = 0
|
||||||
|
|
||||||
|
[ethereum]
|
||||||
|
clientName = "test-client"
|
||||||
|
nodeID = "test-node"
|
||||||
|
networkID = "test-network"
|
||||||
|
chainID = 1
|
||||||
|
genesisBlock = ""
|
@ -1,14 +1,12 @@
|
|||||||
version: '3.2'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
migrations:
|
migrations:
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
depends_on:
|
depends_on:
|
||||||
- ipld-eth-db
|
- ipld-eth-db
|
||||||
image: vulcanize/ipld-eth-db:v4.2.1-alpha
|
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.3.0-alpha
|
||||||
environment:
|
environment:
|
||||||
DATABASE_USER: "vdbm"
|
DATABASE_USER: "vdbm"
|
||||||
DATABASE_NAME: "vulcanize_testing"
|
DATABASE_NAME: "cerc_testing"
|
||||||
DATABASE_PASSWORD: "password"
|
DATABASE_PASSWORD: "password"
|
||||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
DATABASE_PORT: 5432
|
DATABASE_PORT: 5432
|
||||||
@ -19,9 +17,14 @@ services:
|
|||||||
command: ["postgres", "-c", "log_statement=all"]
|
command: ["postgres", "-c", "log_statement=all"]
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_USER: "vdbm"
|
POSTGRES_USER: "vdbm"
|
||||||
POSTGRES_DB: "vulcanize_testing"
|
POSTGRES_DB: "cerc_testing"
|
||||||
POSTGRES_PASSWORD: "password"
|
POSTGRES_PASSWORD: "password"
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8077:5432"
|
- 0.0.0.0:8077:5432
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp:/tmp
|
- /tmp:/tmp
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "pg_isready", "-U", "vdbm"]
|
||||||
|
interval: 2s
|
||||||
|
timeout: 1s
|
||||||
|
retries: 3
|
438
test/fixture_chain_A.go
Normal file
438
test/fixture_chain_A.go
Normal file
@ -0,0 +1,438 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/cerc-io/eth-testing/chains/premerge2"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for _, path := range premerge2.Block1_StateNodeLeafKeys {
|
||||||
|
hex := common.BytesToHash(path).String()
|
||||||
|
ChainA_Block1_StateNodeLeafKeys = append(ChainA_Block1_StateNodeLeafKeys, hex)
|
||||||
|
}
|
||||||
|
// sort it
|
||||||
|
sort.Slice(ChainA_Block1_StateNodeLeafKeys, func(i, j int) bool {
|
||||||
|
return ChainA_Block1_StateNodeLeafKeys[i] < ChainA_Block1_StateNodeLeafKeys[j]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ChainA = premerge2.ChainData
|
||||||
|
|
||||||
|
ChainA_Block1_StateNodeLeafKeys []string
|
||||||
|
// ChainA_Block1_StateNodeLeafKeys = small2.Block1_StateNodeLeafKeys
|
||||||
|
|
||||||
|
ChainA_Block1_IpldCids = []string{
|
||||||
|
"baglacgzamidvfvv6vdpeagumkeadfy4sek3fwba5wnuegt6mcsrcl2y3qxfq",
|
||||||
|
"baglacgzakk2zjdmtcwpduxyzd5accfkyebufm3j3eldwon6e3gosyps4nmia",
|
||||||
|
"baglacgzaxt5p24gzgsgqqpd5fyheuufvaex4gfojqntngvewfdhe54poe7jq",
|
||||||
|
"baglacgzapngkev2hcarm7bmcwdrvagxu27mgu5tp25y76kzkvjmrggrora4a",
|
||||||
|
"baglacgza5fhbdiu6o3ibtl7jahjwagqs27knhtmehxvoyt6qg7wuodaek2qq",
|
||||||
|
"baglacgzakho5pd5qpbxs7mo3ujd7ejcjyhstznb3xx3fluukdjyybxn4aexa",
|
||||||
|
"baglacgza2dbonmaqxik2vhbnfzd4dhcpyjm47rlbuz35cha3jy7jyxvrsoxa",
|
||||||
|
"baglacgza5gn7vz4ksy4go5joxn3zn2hgzf7sudxlq7fthztqhj2ikql3spva",
|
||||||
|
"baglacgzas6yxvcp5fqb65gglmrm4bd2rwju5uxhoizsq5bchb5rl7a5uh37a",
|
||||||
|
"baglacgzamzsn226lwcfyh6cdetnyzoxsz2zcdze6m2lrg2o5ejl6sr5dwe6q",
|
||||||
|
"baglacgzasogvybtxh67x26ob42m56mlgnxwdelfb24oobk3po3te6yysmmca",
|
||||||
|
"baglacgzab7rmzczswht4isr63gea5uoww4pmqsxrvgzn74wheqwopl36mela",
|
||||||
|
"baglacgza2ovtxz2bp6yccm56iacbpp4kgthyz4k6evyp5lq4rzmp2c23mnhq",
|
||||||
|
"baglacgzajf3sy2bvf2vu2d4hqvj3rvq5lblzp4qptxfb4ulcyayhrrdszghq",
|
||||||
|
"baglacgza4wczwxeuvdhklly5renpmti4x34ilhhmgdlcro5jjpyhowgvdwpa",
|
||||||
|
"baglacgzazikph4bqhr7vgs2xiqpebvoyazj27mftysmy6mzoigkutxdxt7ma",
|
||||||
|
"baglacgzasvwqbzd4k6hoheken36oszbb6b6dvfc46acsyhfqssajcqd4xzcq",
|
||||||
|
"baglacgzaui2r4k54xxqxadyjt25kzovmlelw4obn3fpda6gecswheklvrhia",
|
||||||
|
"baglacgzacq4j5rfibfkuxvwa5ui6zpeq7h6edgmquy3oguz6zxxbdkfw6upa",
|
||||||
|
"baglacgzalihtntqwaqxyc5z3olm3odzztqlq6d27rx5mdt4gu2bdxgwwp7xa",
|
||||||
|
"baglacgzat5btacphq4ie5kecajgxjfgvooqza4zb47w24ibv5yvz2dy7zyea",
|
||||||
|
"baglacgzaet376qv35issfdnd44lpe3xxtmzycg56mibqh3ehd6pxbxj6bpda",
|
||||||
|
"baglacgzafkeckix5qfiuuorchl6xdg2o6vis2qknjirq63vryuqcyl24kwxa",
|
||||||
|
"baglacgzayesgx5kytkdemwcwmhxd435ka4aqqpwm6qugtirlnpyoyjexg2ka",
|
||||||
|
"baglacgzamknqvkqe37lskybr6dimt5ngmihfsmnoe5mi4yvtu7dq7tylh5ua",
|
||||||
|
"baglacgzaniotnde2dyyjhdnud5batwqnq3njuh2gotx6hivafivq4qtt22oq",
|
||||||
|
"baglacgzaov7f7oz4onncim5hhnlbjlz7ozpom26kfh66vjow3w2s2cok6ska",
|
||||||
|
"baglacgzai2u7cil4gzmzbas3pulb7qr4vzlirt5wwiyh57slomwhepqdpfma",
|
||||||
|
"baglacgza6twdmxbxie5v7ht5hdb4mqezel5cuwjxk7xwc5vxfepn4wxcwllq",
|
||||||
|
"baglacgzanax447kk5lah6ed5gqzg2eefwyygfn3l3w6n7eio3w5ohhluo7ca",
|
||||||
|
"baglacgzawxgpzpbsbi43icxcrchpoxxcaugcsvh6eusiswwjrtkdlugveana",
|
||||||
|
"baglacgzajshfqz2lgrejfi37nhstsxmjeh7c2jfok4znn4fezhmr2mlwpzhq",
|
||||||
|
"baglacgza3ask2jt3sjqfdiuxxx3fjipnxzp2u3in6z5d3qflo5fxh7ihmf6a",
|
||||||
|
"baglacgzavtfwj5dsgw4vpplzv3zsw6fwiykcpz2lpclspzq55u42vij2g2pq",
|
||||||
|
"baglacgzaelxcuf3wfrqavkk2uunaqjwp3wiuisjreuarwnbiqtdbrq5kwkuq",
|
||||||
|
"baglacgzajieha4wgbglqnmt4wbooug3ffnvayz2lqkqpop36elnocsvprkeq",
|
||||||
|
"baglacgza424ea7tewjqbcwi5fwcticsbiinwh7ffdf2jeqrmjzrpv7xpo75q",
|
||||||
|
"baglacgzajg3cp7yoxohz7luw4hzvg5cnzcduabrogcqy7ilhwhp64nmsn72a",
|
||||||
|
"baglacgza6ogjls57pq4k35agbzpeydujoq65lpoimp4iv2d6cegrdjk4frwa",
|
||||||
|
"baglacgzaqr6cfr453mxviwkqsjfz3riq3nw3lrh7lmev2nuwoop34mjmgjta",
|
||||||
|
"baglacgza5wvocvjvd6bdjteyzt3y7sdimlfxra6c4ndihqlk3oewgwclny3q",
|
||||||
|
"baglacgzamxpcef5svw5bshjcmx5dtw3jvdnsqxyqdoystvutgpk3dbxaddsa",
|
||||||
|
"baglacgzaihrnrw2zuaucifxzmpyg5kz2evaagrybgq2nm4sif3jhr7mljnka",
|
||||||
|
"baglacgzaydqlktfraw5nig2lsjmigudumpo7vzy4mgn2fza5nvl5ukri577a",
|
||||||
|
"baglacgzab2orhwmiw5gxfsqb3bwckhf3tf5jztbbdn2i5eyk2kvd3zfi7hlq",
|
||||||
|
"baglacgzamfflp7uex2uddjuoly44nywthhnugk4u3tjjvr2542km7rtecsla",
|
||||||
|
"baglacgzasfy3a6qvsisuwzgjm3w7vukbubffxx7ei3eqh7f3v2ftrqrfhiwa",
|
||||||
|
"baglacgzayrdorxqktwlfykcpqo3uhyfds3rlsjy6rcapz42x2lsc64otdonq",
|
||||||
|
"baglacgzajwya3t5k5mqyvipqqlahodjmmsljwe4df42igrc7pdgqzbc725sa",
|
||||||
|
"baglacgzalc6y4rmk42q6ix5cxhpinwyhlbnjobwb4knsqr3xe6qv7m6qkibq",
|
||||||
|
"baglacgzaidbvljbgsc2tpdyjwzcsqpszjotijnbls37ropeazffsoi2wamkq",
|
||||||
|
"baglacgzacuyuir4l6vee5vuf5elh7tvnwzymf44c4qpzu2ipo2tbbyp4e3oq",
|
||||||
|
"baglacgza6coc33lehemkv73byblozayqgaclz6xko4kla5pcptbgwhkyoibq",
|
||||||
|
"baglacgza7uco7rtze752545y336slgt7pczgdpmkb6j65x3yydfsprerba5a",
|
||||||
|
"baglacgza4eanzp6ludjfoqr4h67bzlsxjartrqqeq5t4pv2q3b4x2padxbiq",
|
||||||
|
"baglacgzaoocvbederlpqaufwkwso5pl7qkfnrpd76zj6zbwgj5f4qcygis3a",
|
||||||
|
"baglacgzavx7pxqr4m7pfzcn6tcc7o5pq4g5tp6qvsykkhe6rugqat4a2kuxq",
|
||||||
|
"baglacgzaljiw3say55ek5m3x64e66wcifr5na7vbutyuu3m74gimlh47g44q",
|
||||||
|
"baglacgzaqrzyy5uetfwsqgfvv624scsdw7dx7z42pf47p2m3xuhqwuei27ha",
|
||||||
|
"baglacgzayxrz3npxgaz2byd4onx5phnjyfwxfovjbztg6ddrhwew7pvynq7q",
|
||||||
|
"baglacgzac2cndcn3vq5mnjfoz7kdnboebmshmdmvnb6aatzkwnegyfug3cqq",
|
||||||
|
"baglacgza66vjwzsh6wgfv72zygbwgh2vufhfuagmf36q6r3ycnwxx7yaxqnq",
|
||||||
|
"baglacgzac5uhfzgshqvvqme5iw5rx4n3g5lij4eapzaejzpgm6njrec45qaa",
|
||||||
|
"baglacgza6ta2auxqjgh7o2oj6x3ogcrx4cgfxlupdccrq4j3p5zjnahnq7mq",
|
||||||
|
"baglacgzaaokqnkj6sgq57ikquob6w6uhvo6v7ni6uy677pqzr24f3nyll5eq",
|
||||||
|
"baglacgzavwymwhn2owqnbm43vvqtxgd3ab5caqalvs4sz2tzc4cs74b43q5q",
|
||||||
|
"baglacgzahlzt3rfhisvv5xkcyxc73sm6ijh54n42zfsq76ysi3jisro646fa",
|
||||||
|
"baglacgzaqhglxiq5ptweegtm64wuezj7spc2u2g5prw6zdgnwmjwfxdbn5nq",
|
||||||
|
"baglacgzadztftc3rxrphupebphkbwuzdtnthtyl4pfxga7wghxthe463ncya",
|
||||||
|
"baglacgzaz6agggjviebqoyw3sdos6z3jprjr5fe5vprt7dlarq5gxm2swdvq",
|
||||||
|
"baglacgzasdc5a3pa4mtp46bpsru56aojakeucvy57654mq5o2bjp5mop6l3a",
|
||||||
|
"baglacgzaqwwwnlav6alcw7r2umugzbxppixu6mqp6w6qyriffo27mummjmca",
|
||||||
|
"baglacgzabmrd6yhbgxhmghn5nguatwnzhrondlaxmagzxyzqdm24gooneucq",
|
||||||
|
"baglacgzajblmw25dyrzwsfymo74y5h67v4nrfgxs35eevemvqfui3y7rkszq",
|
||||||
|
"baglacgzaivgvcrgjwicuf4aremv2hbggrnzntrddmydzud6rkbpb3xrbpdia",
|
||||||
|
"baglacgzagpnopg2w6cmfzi3avh7c7ovd6rlwmnpu45kkb3wmlx3etchiggkq",
|
||||||
|
"baglacgzaom4zyvyb6kn2hoiyvwg2ywrwgr7o5fe5c3p42z4vuhfzuxmlaoaa",
|
||||||
|
"baglacgzawj7icprvylimlisn2p2626vxy7ukwps4t67gvrhduz5hlk4aecyq",
|
||||||
|
"baglacgzatnjb6dg7fsz4pesso63i63c3t2agwybbgd3i5u4ezthjuvddspea",
|
||||||
|
"baglacgza5oahzgmmqeqqszmqsfbwaq36gbirizq6aii3zm3jyud3pgndchlq",
|
||||||
|
"baglacgzaxyyowwmsdsveoyjw7ywj67krm3x77iqyy3gzj7fdc4xnzjyirsfa",
|
||||||
|
"baglacgzaew7pv5vcxev3udk3dh4eaezwpjgi2pxwqa3umwmtoiw25q5foqwq",
|
||||||
|
"baglacgzapexdm6koz42fosvv4qjbqhnhsuevh7oqmqwonspl63t2vpjqitha",
|
||||||
|
"baglacgzaixcais2z6gwyafi6bpptra65xswthhpd5g26yr3d6ahn3bl2uvca",
|
||||||
|
"baglacgzaimssao3zceshkgh6gltjqqqh2x5qiodirixcvjqutgvdphog7dma",
|
||||||
|
"baglacgzacgrm2zlg4dogiza57lwcti5r7ga6ucswdsp3mp2277jfa7yx77fa",
|
||||||
|
"baglacgzapsts4gledg5dyjaileaqdcffv5zcw6qooifqxgl26bxsoi2n4waq",
|
||||||
|
"baglacgzagz2qudg5ucppkpoeu5iq5nu6q7527mltt5i5kldaeffx4djhnxoq",
|
||||||
|
"baglacgzao3ht5gq4vbud5g5wbwsx5wejlbvgecqqadditqhk5yhbgw4tkbna",
|
||||||
|
"baglacgzacuetfnthnppfxkfzgfza3exvy7gselbqv2s5b6czidll5exmqwza",
|
||||||
|
"baglacgzaqbgeg6rmbd2zxpucpdd73kb5bmmo6p2p6eonafojtqkwi563ycoq",
|
||||||
|
"baglacgzape6j3mhckl4plr42twds57ctqwvwgku5ymjboy33gue7z5xqwaia",
|
||||||
|
"baglacgzazy26zckarnz3jfpcwpqo6rwr5r4wy7bonmc3rljbkr77uoiyoxca",
|
||||||
|
"baglacgzabadhauzo4lxjpslyal3fb5gfrs55hsycsd5r2mj4mkvcgypcvs4q",
|
||||||
|
"baglacgzao7aftivtmdu4sz3inijqfjajstgwhka2vafiigmr3dz5on43ndvq",
|
||||||
|
"baglacgzahtfb5mhojo7zknjhyhnf6o6d65wkz22ellgvxvz2cf32dhrno35q",
|
||||||
|
"baglacgzasx2czupgncbldxwxkqkxez6tt2oldw4iocqrhc7gk6bgp26g2slq",
|
||||||
|
"baglacgzaqeijuarx6vrtycc5267h5g3xzgskgaylrftmyjq7vjouxvkb5cvq",
|
||||||
|
"baglacgzalc42jtx44sibtcvjjhz6drbt54y6lcxy6ucmngi7cvdbajiebndq",
|
||||||
|
"baglacgzahbvb5fbnx2ddikyx4lulfcrftvw3mxpy4bpziskruce3xhz5tcpq",
|
||||||
|
"baglacgzafgf6pv43422ibuujti24hazwtn3ohwylzgo3mt6qu7dven4zlqdq",
|
||||||
|
"baglacgzamet5xv7ury7dnkqy5yltgbvalcl4ricsvdduy7hskmyxslvsa5sa",
|
||||||
|
"baglacgzakxelvpgmk3loheqewteco3z4pusavgv3cjj4xzylahmsiqkwovxq",
|
||||||
|
"baglacgzacqbsc6t7cqligdehacd4kjg2xlpdtbjhd5xtngqswaiiqpdrsj5a",
|
||||||
|
"baglacgza72em77piwedfycox3l4y7qbskqlptpcy7r725im2tpsj23si57ga",
|
||||||
|
"baglacgza636axkok5ao37hjupoeksmk73f3rpimd745avfcoxzwz53bp3xiq",
|
||||||
|
"baglacgza5n7yqni36tyi7clfxxfqciib6j4e3fru6ye3eticdb4b5i6k4m4q",
|
||||||
|
"baglacgzanbkitjrv36vsbyxc2fazsncuapltoqi5yxyntfjtp52dfmw5z64a",
|
||||||
|
"baglacgzazswo2typlq7izwoll6w4xnd3dszwktreiszh3b7w2kt2ucll5okq",
|
||||||
|
"baglacgza44bydaixin7ymaidhsaawjsemc2wkds62ahiaqrtctpvzo6xitaq",
|
||||||
|
"baglacgzay2b7jkphp4kufkhmwiriduyg5kgmqyzjojikd6hvib4bycl6fkga",
|
||||||
|
"baglacgza245jp2gg7wvxvbuvdxxynbsfzynj767o5dv6tkgsaghgsfsmvfya",
|
||||||
|
"baglacgza7hvenpvtima4lqksljjfeiou2lwhy6h7qvmdaxrvp6iglprd5ecq",
|
||||||
|
"baglacgzarrbzhd34po574cixc6tk2wd4escxarqzoqnlmplqkirhq2ms6wla",
|
||||||
|
"baglacgza6wjkyvgipgaxhclghpthoftpkarjiprp4g2smf5b2foc6nj7e7oq",
|
||||||
|
"baglacgzavtod2r5swzrok7fapkssy4mufrtid37trvz2jxzhnifxh7rdgxdq",
|
||||||
|
"baglacgzaaju4hfbrfcsgxp2dqrqdjrrfdjwjhbcubmmum3wsveqgsisv5sjq",
|
||||||
|
"baglacgzagfnw4qkfwuqlrd7v7nryxergohxb5s6lmw2xxgsl4zikwh6odu4q",
|
||||||
|
"baglacgza3ieihinvg2srwi7dupigwsahksvrlhninkyxt4ewb426uqmqtjnq",
|
||||||
|
"baglacgzaapcyag7sitbiyxcdbbj5m6l64vhx4gt4hbhvdwgjuhoezwlmw5hq",
|
||||||
|
"baglacgzam3qbvtektatlypk7kkdidh6fra67umeugmy7dz77fful7rl6ulia",
|
||||||
|
"baglacgzaeifznjadvk52cuv3qvbitazdkkavu4q3detg7xqhmsuykaemme3q",
|
||||||
|
"baglacgzaqdcmhkhjwdwatfshq4axfenrhggqceqrz47yiupwweqknnrvqfya",
|
||||||
|
"baglacgzanr74m4zutwqp4ybkpgdyborqoccfnigwlv6ze3hyou5jlrrnxchq",
|
||||||
|
"baglacgza5zaewwegrxjtaezosakyqpplolmav35eqfdyjju5okk3tmogbtkq",
|
||||||
|
"baglacgzavsgqcwu6m2hvq574yoi7vyzzqhaak5yjn4cflnbn6t4oqce6zysa",
|
||||||
|
"baglacgzafnsgu5ksxa4sv2kcmn2x62m2e7losf3ljqdlt7akoixyso4wi6kq",
|
||||||
|
"baglacgzatcbgkfcnzesrtyfe5hxe2yuqek2hvgmwvla2zjo3i4rvhnb2k7yq",
|
||||||
|
"baglacgzavzdzgv2mihwc6qop5hkv37hhx26dmnq75sfg3jf4nkq5vd4pjvja",
|
||||||
|
"baglacgza3oids2arkgomy6bblcggrwooaqyj3foxbxiawhckxhyc5phxqzgq",
|
||||||
|
"baglacgzaj2yfzqrtpjd6luyv7spcs4xyrmrifsxm663zznegzt2omto7ktgq",
|
||||||
|
"baglacgzaegino24jsful2fjnpe3haf3hhztdzzm626rdtmksxauccfzv335a",
|
||||||
|
"baglacgzazvm5p6m3ynh74glcwhuxtw7b3hv47ml5y6mtif2whmklebfd2mka",
|
||||||
|
"baglacgzak7v5o37lheriih5julg5c37gc3wpxmxudysjo6fttnju65efl4ma",
|
||||||
|
"baglacgzafkusmmr2rw7vijysdeldocemzrvwszho6nbvxakcy3buf3ytk4oq",
|
||||||
|
"baglacgzafiiwa2wygo4qm76xt3tekscp4ioub4u34vz2aqptp56frudzgjkq",
|
||||||
|
"baglacgza5vqm4jugxseggsbniznupli2bivz4drwupzzyfubqmt2cggrk7wa",
|
||||||
|
"baglacgzae27ionu7mlu3ojudqd4a2ywhyrenxw7zrshr4jhy4ld2fqpgkkia",
|
||||||
|
"baglacgzajdmyteoo6aovcp4w2wfnqlwp7hhncrgkajtqm3fzbxo3zhoko5na",
|
||||||
|
"baglacgzaan3c7frug6yo5tyyv7kzn6bzrxtwkwy35bmuvikkq3v4i6suovpa",
|
||||||
|
"baglacgza7p3a62673mtcsidsps3ep3atul26nzldgscxv66rvkmqj2gjdejq",
|
||||||
|
"baglacgza37tily665vel2tvvcavpqtj7n3qot3zxvpsog63iqkxmfldastva",
|
||||||
|
"baglacgzaeuvjvxxqf42qg44zjlnpje3ls7kpu2hx36uho45n27jjikys2jiq",
|
||||||
|
"baglacgzab5yedqfwm3pczaqnqfvsondxhdyorywu27q6strjbc4ixq3glizq",
|
||||||
|
"baglacgzanynqqlgddfsdtm27kvidm35d75yocvndtsdeijt7z64xkilxin4a",
|
||||||
|
"baglacgzai5bxsipie422mzr6u2itm3wgfyg7p425rcqn2hg4453fxnepaa2q",
|
||||||
|
"baglacgzaarg23ok2cd5nr6jc4ocetujiqb7nnrft42xvfuh2vbs35dfyqr2a",
|
||||||
|
"baglacgza4ztanbjvytkd7462vy5jbgwoqypahkw6gzi6a2h3ktsisf4wajla",
|
||||||
|
"baglacgzaqp33qaf7bfj5w6e4k63cbrc3oqemubyxgjmv7wjcroatsqflba3q",
|
||||||
|
"baglacgzamwsrbjbo7pyf4ftaizzj2lsqdqhivh7pu2evcgraenjg6sx573oa",
|
||||||
|
"baglacgzagf4zu7uebnql22h7pmuxotzjcs2y7y7o3dz3nsogfou4dqxa7pja",
|
||||||
|
"baglacgzaaqveulltjfdqenhsig3nzfwdwxso3ndbgovg2gnczkqop7vpbbvq",
|
||||||
|
"baglacgza22ifq7h6bot66tpn5xudjfcqtydvk7bcang7lxosyfum4ifhd4cq",
|
||||||
|
"baglacgzarr6a6fovyug5em3cqkzmggna2nvjohihdin5ffn4f7k3cm2qc5gq",
|
||||||
|
"baglacgzaao5djij6f4x3jp3qszkawqwusvofe2mhloopb55yoyzfqxkezgsq",
|
||||||
|
"baglacgzavcbrgucanfxqhbshrz2hv62vfkrtrhlv5qx6swbc3zavqvcn6zta",
|
||||||
|
"baglacgzark7ier7445klswjg5eqx5qxoiibq5mrmbctybd2ffu4gwffqkwyq",
|
||||||
|
"baglacgzacahqtmufgqhyzdgynhxsezldqc4merrerrf3y4jw5d64umjg24oa",
|
||||||
|
"baglacgzasfdhsvcjbujhmmosulzzu3w2xvyccu66qf76rwrkxgrqke7fy3oq",
|
||||||
|
"baglacgzast2lxo3sgtk5qtnp64mwxyjuozwyt5v3rg4ytrnleporcqmb62ua",
|
||||||
|
"baglacgzauwwnb3h5pxhm2h3tmcxrc3t52jlbpibalnpywnu34p74pbge6wuq",
|
||||||
|
"baglacgzasb5vgdsv56jygtmspwoswmezrfnp2kray7xhuszshqa2dfrs3ypa",
|
||||||
|
"baglacgzabhaasbte4bwnubvxduslb4am2dotafbel5lxvzki3wn5rs4dl24q",
|
||||||
|
"baglacgzaqm53klhsbyfek6wnzmzsah7iz2km2euk75yapvez7fyl73gfxhxa",
|
||||||
|
"baglacgzawaf7gawvue34nkiksyyrpizlmtkuu275e2xxhaxiirhsmmoeo5zq",
|
||||||
|
"baglacgzaaqtskzrmoaoexhra66tmvdxne353oxcxuzq2dca75ldjrqqhoiaq",
|
||||||
|
"baglacgzao4txzget4reg6nj6uwptwdu2n6sohzyfeivkdwdzvziouna2uvua",
|
||||||
|
"baglacgzanm2vfedt2eqsljbb3iwri7hu73bnb3rqgrurkmrsacfzejju2nda",
|
||||||
|
"baglacgzavxzbb6zhtlf42msx27zozxk4a6twphs4qsxchlrt2ny6t5we2t3q",
|
||||||
|
"baglacgza267mwypnyml7gmua2bifcmpndmtwzzw2dfjox3dfixo25uopnmda",
|
||||||
|
"baglacgzat2wiom6pryjqdoptciek3ckt3ctgdeujprivuey6ypgfsjypr65a",
|
||||||
|
"baglacgzavz4xq4u5fosiyz7ldtzluikmtco4k3mv4xsrnppjz5omgutz6abq",
|
||||||
|
"baglacgzacj4uv2ru2opsecdduklxkbxl4vkvyk3ercuunh7nsgfxit3h23mq",
|
||||||
|
"baglacgzav3o4q33y7amd7bgpfs5xc3kog57nnhbruh2s36pziymkmv32dpgq",
|
||||||
|
"baglacgza7hx5cpakzowq2h26ocionl2t2p6ifhui6pju5xug6wgifi2xkv7a",
|
||||||
|
"baglacgzaty5w2ykcxoxf2zfdcr742hzezg32vyanvv2qz6hbox7atjqknqrq",
|
||||||
|
"baglacgzaoyoxana7gxkhxwj47iiqjv76y3ktnk3kootf3pzfpxcpmzp6ptma",
|
||||||
|
"baglacgza4x65ftjd3telo3eyyzrgosshvnlu7kj7enzezkwiowxsentq2twa",
|
||||||
|
"baglacgza2u7imlxl3apzarjovwuegtp52a5h546qnvw3hzumxr6qlx7yd3aa",
|
||||||
|
"baglacgzay2imkpytg6m7kmq7oloogxzgfc6t7sm77spappsm2iajkdsqif7a",
|
||||||
|
"baglacgza2gxxoee4k2cxdf24whfylc7x2eb6eshvrunugemjp766sxhbx6qq",
|
||||||
|
"baglacgzaz6sqay6zefbflfsyrt43nsszivnrywlokmridmcox45ehavr2bxq",
|
||||||
|
"baglacgzawx34khb3fvi5s7yxduvtrjg7dj6avtc6wdpenpxp6tih6xwsbymq",
|
||||||
|
"baglacgzaxh6czvlet4gmuorror6l6m7qrr4ymkolyr4lzofbme763w2peijq",
|
||||||
|
"baglacgzaw7it5iumtdpxyfxvlizcwsthfsemmyjqmb5cq24hemei6dftsjtq",
|
||||||
|
"baglacgzapevdnthqwueqltoge7dt2cuxvijmhep7rw6cnp44pemp6sluitka",
|
||||||
|
"baglacgzaesu7doagjxn3mknma6nifhvfjoznwlgjqomq6jpxlcejioxu2upq",
|
||||||
|
"baglacgzahojkgpcys6csj4cos62mt6fwb32xsoca3l42qci34zqjmtyvd7gq",
|
||||||
|
"baglacgzauefudv2ingzufqe36jloewm3xketyjvnc4e4djtpbathwjm66a2a",
|
||||||
|
"baglacgza6z2kpaqbk2lezgrkqrznv3c7uaomvab6646z7qo6n3rsbz3qpbka",
|
||||||
|
"baglacgzaeqh6atyhyht4qqqvcyuxdg3uqfu5x2mujowput5bjcuor4vnzrla",
|
||||||
|
"baglacgzatwt5s5k74dcvrm6d32p5zx47fcxgihzyzf4hwbnxhkzcvzj26pra",
|
||||||
|
"baglacgzaszpquuoaaaq3auktxvag6h3fuwpnnrv3chfrymdwb5khdqwfxa7q",
|
||||||
|
"baglacgzaf2bu6l5bt57gstxyudjbbrj6jddfac3qmr5jnkt6tgwbj3qpfavq",
|
||||||
|
"baglacgzaeph54ay7tbgyox3437nbngzluz2k4kkqmjh6ymgbuakg2c3mf2da",
|
||||||
|
"baglacgza2wso6cd6qxxk7kwtcgcx6gg3nztqk7h3kepb7if653mn7magazfq",
|
||||||
|
"baglacgzax6ioorxkqyls3kmv2ntmfhsbptavrrtit2vy6zmgbnltjjbyogpa",
|
||||||
|
"baglacgzawf46giyla7nssrdtvzl7afycmj4y7dcvdr2vwvtfvtqscxhocdfa",
|
||||||
|
"baglacgzamyk5sdzyg2vnuzaqmbwwzqbbh2xxgfcouukhmcjcudy2jdw2dy7q",
|
||||||
|
"baglacgzaizfqoqu2aubz4iutcsjnnrrfdkdayamouoiaixkznmnmcg24pktq",
|
||||||
|
"baglacgzazcudtwhvet6q264rgjonf6nt2a3omigym5wpabkq23kdeyvxqr6a",
|
||||||
|
"baglacgzatymnlewdcj7uqohfdcrcszva7nzezhgib6risqpenllqdfch3i3q",
|
||||||
|
"baglacgzat2pxiuhdayqh4ma4ss3wxk2uyipuciqonxig3z6jitc5kdmrozha",
|
||||||
|
"baglacgzafokb5hx5vy5ltj4ee6ndad7c5fbak3j34ap2j4u2i3mbt5oeqkzq",
|
||||||
|
"baglacgzakuwsijjghgtk4522uhpxad73slbechnou4ug6fmniqebzals2bza",
|
||||||
|
"baglacgzaxl62rn4xijbrpvuzkbb5awzhuasuihynltlwwau4lij3rn64rb3a",
|
||||||
|
"baglacgzairaleq3xeadqowm7ec7kvxmbjsmqrltobjcqjso545a3zdcge72a",
|
||||||
|
"baglacgzao4vipuem6ogey2f73z3qs2cxdk6rn7jygxfzajegxuxfcxktyewq",
|
||||||
|
"baglacgzafufkadgo6qcmddvnavloopfzmozwxi3p4h3mjn5jw2xmj5ws2ipq",
|
||||||
|
"baglacgzai3dvv53agiud47vx3fs6gpqg5gvjze5xsecatnh5l34e6pgocbia",
|
||||||
|
"baglacgzawug56abirtemcm2skgyexstfmmrvivru3xjcgdyxqtj7ef3jxnjq",
|
||||||
|
"baglacgzau4tmywowb37dv47edd7pl5af222ba23pfrlukvkbersc6vrv4qwa",
|
||||||
|
"baglacgzabqzaabcpgd4pnucu3izbykoognju5kc5qwtfkualy5r6todywowq",
|
||||||
|
"baglacgza2g5mo2mblvbfjjrm6xk2ppf6jplupamowaqb4j67szvaytx3wfra",
|
||||||
|
"baglacgzaw7ftkn6xzbnwyvievvi5xuoqeodvbdwirel2cvx4a6kracedtiza",
|
||||||
|
"baglacgza6anvax7pis7sukuzo6t27drgmckh2ahdork3wmzhqquidlakjpqq",
|
||||||
|
"baglacgzaywc4cisesa54dmxrzulfzvg37ldoe3vebiqoncqtrhdxaypepf6q",
|
||||||
|
"baglacgza5ndtrasv47fgrnbpuvqyaam4mhrn2ma37yqce3lkotlzl5vqc2ta",
|
||||||
|
"baglacgzargpxdk5rrrwjkyiyx5lh7ldctn27p2ksnbz6ikot3cv3nw5vqaqq",
|
||||||
|
"baglacgza4rw4nllzvg5j3kvvrsisd3jcwgq7htdege42ris6ddkpiti65ala",
|
||||||
|
"baglacgzaoao7i2mmwuopg2gfx5m3xn34fayjdrov2yolscqtz7vi5emdqdna",
|
||||||
|
"baglacgzavwgvvyakic262434m7kigrzlmqautwbknymr4fyngjkobh3cyl7a",
|
||||||
|
"baglacgza6gta5cebz7fs3riluwgde3gmtjw2qkd4dzpvnuqbovr344aaldca",
|
||||||
|
"baglacgzao6ru6zkgi7lknzzc4xogdvi5bkoux6gaoj4rejbazar7yavge5ta",
|
||||||
|
"baglacgza2lsx6yk2i5iiy3tasnjvgqult7a4y5lhpi7lr5pxhvq52cvp6x2q",
|
||||||
|
"baglacgzatou7j5blylumwrr5hfsck3hqrasegy55ewwgldtwew3uykaszcmq",
|
||||||
|
"baglacgzaqi5dqutwokxefveag2nibmfzylw6szglsntiybeh4e2bmb6f2xxa",
|
||||||
|
"baglacgzaovkdfxjerufbq24zzqm767juiyt4hcu4ivlpvxh447w66rpfvtka",
|
||||||
|
"baglacgzawez7iipzfpgi2jirdwusmbvhdjporhu77ejvoam7duwmequa4isa",
|
||||||
|
"baglacgzazlnsvtqu4zd5tjtz5bct7d2aqiotmfsfg4eg62bki6qiti6fdl4q",
|
||||||
|
"baglacgzagfqonr7vtlbdofwm34pkoz325axn2v4pxyxbdly5enjbfnwo6eyq",
|
||||||
|
"baglacgzaljokkpwqxdoaoyrmsml6b7b7zfiqefbhwxlmexxepy2d5wuyekya",
|
||||||
|
"baglacgzabu6rq7xkdr5uoe2eunlx773yg2kk2h2lho53ef3c4adky2jhs6fq",
|
||||||
|
"baglacgzab2hdhand5g57pqt4uslpy2mz6rqnkwlvw27bczvsc2tj2m3pr3ba",
|
||||||
|
"baglacgzaugsxw7cthfl3fg2rlhemgut2hhitktn3bovkjd5hawrvi5ss7gsa",
|
||||||
|
"baglacgza6wtl5yiy32ruo22c75ysjtnxrghptmimp6fp2pq3ilpaxqyn6c2q",
|
||||||
|
"baglacgzauokbnjmp7gn4sz7e247j7ift5hrueq4zzq577m557j3bmqnwfixq",
|
||||||
|
"baglacgzac2lofvuakrf675xzz6hh2ahgbd3z77gxc3ofrjolqjqj7dqhzopa",
|
||||||
|
"baglacgzabsc4xuh7rbvblytwkhn4swzctyu43ba36xoehvuc7cpmbnkd3ska",
|
||||||
|
"baglacgzayunrwjhott4rnqk7fniizbsv55apaqalgup2fnf66qip6aartkcq",
|
||||||
|
"baglacgza3zbafsnpvwa5xw4xpjmx3ndhmuhynaoxxrzwcnfxi6o4rbwpu2hq",
|
||||||
|
"baglacgzaqm4ijihatant626rqycd33xaerqj77zivb5iwmgyaqwgysc3zf6q",
|
||||||
|
"baglacgzal6llyltmvocfvqgxq5ltwunaus5ntfhl5ze5f35kd67oj6y5lq6q",
|
||||||
|
"baglacgzauyqu2gqzcc2xtmahbe4bnlubzp2thteevnp6bfd3kxpnxozq74rq",
|
||||||
|
"baglacgzazklwtf65v4dpdcms6yqh4t3kawlz2b5m5lmwk2afq6eqc7gg2bvq",
|
||||||
|
"baglacgzaoyn5xje7zjq52lswouegf3w64k4zhyqp6iclfsyj7wgjfjwyvicq",
|
||||||
|
"baglacgzanrcxybniprkx7bhw3ggpwn2uuigb33ifkdxuavbt2niu6mzmo7pq",
|
||||||
|
"baglacgzaxxsmknpbqxei7ffyjb7fhqtvfrwxr4t6zloyavtkt3jygvsldlra",
|
||||||
|
"baglacgzaaiqagvbyp2jrclsjllilvba5ajksvpj6rsygtcx5suskigolta4q",
|
||||||
|
"baglacgzatghruydgf4lodn6vmjtvfpvf755goj3jkeusdwia5pixldcqjmtq",
|
||||||
|
"baglacgzamfrwerukgoisehrxqlnefyww7ohkihngxxjnm6pcbpydoxagcwda",
|
||||||
|
"baglacgza4ypfm4rxwsoejwhza3housicojqliaimccsupm4nrmjrxhj3n6ca",
|
||||||
|
"baglacgzagp3wukeubt7wqrdq5okknvbyh6rueyo5t2np5rg2whot573jq2qq",
|
||||||
|
"baglacgzaxjrq5medoijedijmlrkevn32vsthf6vhgtojvtlttxo2ze5brbja",
|
||||||
|
"baglacgzarwmkoc2al7nxgjxdysbzdiq4yfcbthxhbs4hkquxxnevsoxnwc7a",
|
||||||
|
"baglacgza2jleouo2qqbrfv7uc73q6aw4svm74ltjhzhsqhpmqdcsxmvjxurq",
|
||||||
|
"baglacgzajno3x77dsi7inf4voolwgevuslix7ays2u6oh3z5mq2klkwbj6hq",
|
||||||
|
"baglacgzar2p263trvudcq3qwjppcpfgzmxc4taacjtekhkfzsqtatl2wp27q",
|
||||||
|
"baglacgza5efjepjsmz2y65dfccco56i5jvrkn3wochllzfze6k3o54qkvlaq",
|
||||||
|
"baglacgzaxrwu73uyvnvmbfuepvcxeryunic3ozbn6t5uxwypoy4puej6z52a",
|
||||||
|
"baglacgza5ux3uey7vxvn5miif5lf77ywz2yar5utavxdcqbai4lma4446hqa",
|
||||||
|
"baglacgzaufpcg6e6rm62ybb2a35vwtk2ptqt4z74pj3zmii6rx3a3dwnnw7a",
|
||||||
|
"baglacgzabnitw6kehgnmpyrjdk343qnzt4cekjlmypymhnvvylkq5k2ptcdq",
|
||||||
|
"baglacgzauckhnf4srmqecrryxiflfpf6kavfhm3d4qmjzkxg27f5dj3546cq",
|
||||||
|
"baglacgzapxzpwc5xrysx6y74fs6pybyqlfly3olnv5zaazqsbuztbopuc6jq",
|
||||||
|
"baglacgzaqtea7gzv2h3jroibscowoifdm64hvqievgvxg4v6kymat7e22ncq",
|
||||||
|
"baglacgzantxg5ciyqddbw2tjz5kwrbh2lmxikruq5ifa4xcfsiwfgs2fheja",
|
||||||
|
"baglacgzajv4bm22iarh5ykhneljp2ooi35xyvkqezny5hilsq2cw62et76bq",
|
||||||
|
"baglacgzajiyfhc7uqabfypgpvip6dildryb7c4epz3tzxsoejbliwozlbphq",
|
||||||
|
"baglacgzahsh7cceh3en65fkgjesotsxs3pqbhflxzv5kdkxnz67jd7c4pczq",
|
||||||
|
"baglacgzaz7hm3bnvwozlapazmwe5hu5zxtin37ab6aam32p6hsvudxdkbila",
|
||||||
|
"baglacgzaz5yvtye7y27sz7oitmxfgt5yvqdzcn6z6x2vxar7rvluzqoh6dfa",
|
||||||
|
"baglacgzafelbojewhho2qlzz2d7txvh7ycbjntfmqkwdxkiw6raesraqfznq",
|
||||||
|
"baglacgzawat7pexa2n2lq74lyoq6axky2qzzyf3h6sa6hrucjc3z45elm6zq",
|
||||||
|
"baglacgzahwk3er5cckpklgmlw57cna2p5hkwwekjkkh4iz62pm5ybievfqta",
|
||||||
|
"baglacgzabi63cfckdctmkqdhbcdwszzatr3bfcyyuaocrgnypedvjmjog2za",
|
||||||
|
"baglacgza4fxgurqdgfxs7ja427ikr7e2rxfhzi3hmov6hg4z55l3qow7kaiq",
|
||||||
|
"baglacgzaxq3k23qmqsllx7iz2ymhliqz2jewob2nckhdd2wkxtf3rb5drpwq",
|
||||||
|
"baglacgza5nzqr7e7b3h2gmbxz24vdcmfcoadnzbie6nbtvigpyfigqerrxja",
|
||||||
|
"bagmacgzakvveqidigvmttsk2gqjl3mqscorqcsb63mnwiqbpwzvmt42ygwmq",
|
||||||
|
"baglacgzalodtjmdplb7dy2p5arsxk7nyszh6lhsyzxe4lgkdgrp6rymxzela",
|
||||||
|
"baglacgzauzvc7x64vjf6wlwaisddf4vf6hjsfmtlypnadtb5i7kbbasizmma",
|
||||||
|
"baglacgzaixlti7he2ffvgp6raqotxkdsekh5qy4duv3tmtn6kvn4n6sjuu2a",
|
||||||
|
"baglacgzathtbu757wgovtxofbnlsnsyad662vbnn6aqk3oyyx6xixtxsw3oq",
|
||||||
|
"baglacgzaz6ajmdnij27zbfrxugyesam5i6m6cezxfveoxjadnolwjelszw4a",
|
||||||
|
"baglacgzaxzceixddm72q4dlup2gwlsoxfykcejxavmskrbravtwa5xcvnktq",
|
||||||
|
"bagmacgzavl6vwffg5wwncspbcc5go5vgktznx76kgqeqfputhuarce7soubq",
|
||||||
|
"baglacgzawksvmxhdtwfx7k5silyip4c3ojz255cast2bmycgzxozpb2rys7a",
|
||||||
|
"baglacgzaywze5wn2o5cvdrdekjdjeet3tt36r3wfzwpcop54iumbvrex6zpa",
|
||||||
|
"baglacgzakbsr5nin4suyz7r3xxzcxkuel6fghs6zrbw2yi5ez2xo7nloerpa",
|
||||||
|
"baglacgzay5ujimrt4qi2ksavtfjysqjsn5m6ysxizi6hg3gqhpnuj362d7nq",
|
||||||
|
"baglacgza7q5xdqz6fzvxprpesta5w763wrduopyahwxtpdd2mo5jx47qasoq",
|
||||||
|
"baglacgzaisv2zdtclyzxlffct55zevsfb6wxmu462ft7et5qahpdqrnmcsba",
|
||||||
|
"baglacgza5yyio2rxxtbrkpk7vvv2iyp7pfp4bkismdma3mk6qkxlhsiy4f2a",
|
||||||
|
"bagmacgzaugn6dwvyjeqblgmuhrlxoerqgrzpev6uhsmi5f752q7kfsdiuqxa",
|
||||||
|
"baglacgzaq4oyzbuduaeeg3ww6bzspstpbtcb7tiyswmaaymfpvao2hqwxcva",
|
||||||
|
"baglacgzabqho5affvmsfef3cnd4xsw66l42d6ena4g2xedujct6qsd7o4a2q",
|
||||||
|
"baglacgzapohhuiobc6gsqb2pcv5vb7fil3rfyeswr74os4dnzpg2zn337bka",
|
||||||
|
"baglacgzaovc4t2yesyqvzvdsybtp5k2y4tb6xy676gwnwsr5qoztogehxj4q",
|
||||||
|
"baglacgzami2ovudshhpsyi6vbuq5fycfgmv3hyx3bjacvlsxqc4chz6vgcda",
|
||||||
|
"bagmacgzafb27j6ni6j5vwm7kfxfwfuqau7m4raff5v44ulu77z5wwp2bpnaq",
|
||||||
|
"baglacgzaqw7dbrzdyxhjsdn22orpgfzxxwdqcf7hn7ugy4hl665cckc5oxja",
|
||||||
|
"baglacgza5psrwfh6u2vklqex6jigq5hjscatynwnge4z5y6xeztn4lo6h7ga",
|
||||||
|
"baglacgzauiscf2uzdir25zlogw4qpzwriy6mtgsyzl7omehok3jpmskk3knq",
|
||||||
|
"baglacgzas4zhiutice4t5if7jai4vedxkmo3adigxbrdpixm22b7kw5exsya",
|
||||||
|
"baglacgza3tax6aemhf6t2lqknaazzsksu2c4fjllgjx2izlkv47qmhzfgtwq",
|
||||||
|
"baglacgzakncmprlqvhlj4nfejd7odbude6hmeykm6wspwqpm7bg3xoqi5dxq",
|
||||||
|
"baglacgzaa5igkis4qk25v4ko6eryts6watdot3ark5uzlxm3o7j3izolxala",
|
||||||
|
"bagmacgzaomwzsxiv5cwrrjquk4ryb6z4u4xhuu5xhpznph2oyb53ixrsvvca",
|
||||||
|
"baglacgzafjhvq54vejfj2vrvtidr6nlt3e4azkw5jg6kdnr2dot6edm6mzsa",
|
||||||
|
"baglacgzasvs7p7bsxtnrb5fz25cx5gyh43tqja74ywrhwpmt27gnni4z3qda",
|
||||||
|
"baglacgzagrolvdnsflcwzcmqnbbyon3enber2hlamdf77kvhwousoyznwika",
|
||||||
|
"baglacgzahkj5ojwxjb4hjzi3klmnkngghkrknco7ddr3gb6a23fquoeladzq",
|
||||||
|
"baglacgza2zihxbb2gl2daaft5miumsjqbps3xgmip2r52ubrpii5zkpshpvq",
|
||||||
|
"baglacgzakhvmbzxior7nsroicglbhkbvts3weihhcrqqz54dhcgosaavgiea",
|
||||||
|
"baglacgzaqlswzpybvsbc3fqkr4iekizldlug3ak6qsuthtu5qtybmtij2lia",
|
||||||
|
"baglacgzaajspycacn5bhe4dpspprjoayo72z54wmrxz5n7m2g7of3eazijqq",
|
||||||
|
"baglacgzax7i3elt7nndzjenb5xkogpgelmcmmtn6lqp5v6kvyfqe7m5k5sya",
|
||||||
|
"bagmacgzauubmsoyzddcmmu2niwj24a5fui72cdv4gd73ocalff576jcg4qwq",
|
||||||
|
"baglacgzasqqcuuppbzjikphak2gz56fnuysk4vnlq6andul7yvwolmswisiq",
|
||||||
|
"baglacgzam2xbzezi7l6vlyicgx6i3kpiqceh5veonhmpa4pjny3eibaeolwq",
|
||||||
|
"baglacgzabirgkutruwdjfcpl6bkujicvpsixkwfjh5hmuy7xoamdysl23dsq",
|
||||||
|
"bagmacgzayktazfgfoa6a7g5ijetwofgbp4aphqxbok53sqoc7pfydslq2moa",
|
||||||
|
"baglacgzalvkdmoxvvqpflgq235nahqiw4xofhxzhuio2eljusr7uhrch7nnq",
|
||||||
|
"baglacgzazsxzdrr4wtg24th2crzvzt66fhg7dy3zppagpy2nn5eesdrsaq5a",
|
||||||
|
"baglacgza2vpmjbvshqsmj3qfuh2qfcx5kg654uhqbknb3ok25ppmhnfd35sa",
|
||||||
|
"baglacgzadcjenr5pr6xnfr6t7b64rnnfdv4h634k2zm2y34roiuuwpp75vga",
|
||||||
|
"bagmacgzau7hv4cknn43r7hxusbijdicen3yvpftldneg5zc2xmstgvhft2ra",
|
||||||
|
"baglacgza4fxgo45wl7zhyqula5ahuljoi6lreftfcwskipwmhrcejv35j42a",
|
||||||
|
"baglacgzasoghibkt6mikv6sjvnvv6zci47gjmnkumjzxhlei4tvq53e4jstq",
|
||||||
|
"baglacgzaivd7643lhy6s535ukinqa24onqywzkfnfhhi5r7uvawxtiw7urza",
|
||||||
|
"baglacgzaqwe44wrh2zpa7ogoka44yx6hox6w55jnndhymz4nerazqjgxedua",
|
||||||
|
"bagmacgzaha7rcryssphnazakbiunmc42bokxd5sgzrbo5cnilp3g2zt3vnxq",
|
||||||
|
"baglacgzab7lroi2stb2cmi6awpfpwpsl3bwawwvr64ijpng5dhz5nes5owgq",
|
||||||
|
"baglacgza6l4kyy7nsrg2lahabyhvclpuncic2sqtzvmefqofpuq5lnsdhmra",
|
||||||
|
"baglacgzacsbz24qw6iy2vviclvzaegksg22ryng66bhuxpj4dl6pcg32wzxq",
|
||||||
|
"baglacgzazrli3jvfluavjdjwgkt3qktktnuh6set2t7ib7hzhanobmwxwvla",
|
||||||
|
"baglacgzankthcaoqchi4el7hhhxyhmclkikkhyxy4grgexml7wyrnnch5bxq",
|
||||||
|
"bagmacgzaf2zl6rp5iq55dx4ln6oas4tkjrrffihxrfvbggqidy42p5sewoeq",
|
||||||
|
"baglacgzav7vn47ouq6zebmg3img7nmada6ag4hx25uouzqxttyptyudr46bq",
|
||||||
|
"bagmacgzasc5m55cldco577of6ixny4h6fggfrzpfeptodx67pw6g2zl7punq",
|
||||||
|
"baglacgzaerhefaw75qz4to3wkfrm53spfzrzaaz2ss3cbvikf7djipv5ql6a",
|
||||||
|
"baglacgzahax3xfs4df4ywelodmzk2zgztppqt6hu5vgihyntrd722dxixrra",
|
||||||
|
"baglacgzaeqyhcnkoumzym36selclrief3po2p4yj62juga6r7ueszzq7fsaq",
|
||||||
|
"baglacgza6oydtjhtene6qxdyfuiwjqmjbzn7c25nzhxez6bh3nvp2irj3xta",
|
||||||
|
"bagmacgzae3xnnb2gakf4g2plivvx2pxeowvbn42ol2vazgh55w44lhv4koya",
|
||||||
|
"baglacgza3esavhjnlbi5awux74zqkm2n7wybahq6gip4e6osxm6k22x2r7ea",
|
||||||
|
"baglacgzatxyuvssxlehlznynti47jiaoyj5kqevfdmu7yj4npmjr6l6uyhfq",
|
||||||
|
"bagmacgzattugdfyxhykoayz5xbgor3vdfrkfj3v6svdxsjkwis2fw4l6rbaq",
|
||||||
|
"baglacgzaf4sjbg7ya3pq737z7im3pmp5vubrly25hfkvea6n7pfapib63kyq",
|
||||||
|
"bagmacgzagkghv6zmldxt7dcbc6uoxuzw6gtb2jczcbt63hc2v2khs3fmtb6q",
|
||||||
|
"baglacgzavy2t2fxjdf7pgnx6dzz46eczpnjdwveeiihq5ev42guggtnivpxa",
|
||||||
|
"bagmacgzajkxbxnhzvomtm3vz3rtsokavrzinenk3anvvqwog6tg6byve76nq",
|
||||||
|
"baglacgzahkjgb63xoh6ke37ztl4npobu2gkyh3ae3jjii4daodh7utnujiqa",
|
||||||
|
"baglacgzacthcbn5p3sqfzmpzrndyhbcmneuptrfwr7s5disl54oz5nxm5s2q",
|
||||||
|
"baglacgzam24ldzjqb3puomhwshglrtjcyrcpkpva2wybbkltfws6tor5tp7a",
|
||||||
|
"baglacgzaqkecamlmyav757mjtk5ecnaglh6qnxy6bidzmkd6yksbcarz63ja",
|
||||||
|
"bagmacgzaquqfnzlnbsk5idejdyvjpchlahovlbrt3degno72rl4dc6htsymq",
|
||||||
|
"baglacgzaecczvtf4q7l2mhitw2tn4y26ysaolmicnoc542wkyvvrs47o7a3a",
|
||||||
|
"baglacgzavs7qjikqvxuxkpz5liqdyqrzaonkllqw6kd4lf2cxjltxxlgz2gq",
|
||||||
|
"baglacgzawwi2ftqcgz7numopfulozj6cp7ke3pyims3e5kbftljwnfxlfica",
|
||||||
|
"bagmacgzavhhx6zz2bphhn7kagmvp5bqbkqurbnen5jcosojtups6smg2lumq",
|
||||||
|
"bagmacgzao5vkivv2triaryb3qk4edkopf7a6qv4m7lgvzeavqbhk4mk7c75q",
|
||||||
|
"bagmacgzaolr6fbgupow3wcs4ufbb4elz2pvjbtaqpbnsnn2pxcub6d46qqma",
|
||||||
|
"bagmacgza3x3z3mfdnugicnf2cq54wva42r4vvgrlv2fmuc5cjogysy6cu56q",
|
||||||
|
"bagmacgzagatdibfm73qqhufragifh7zsid6oim6gtnyjqmlhgkc7uwehzzga",
|
||||||
|
"bagmacgzamsaplavqsdtlvhzyovqewgkyk26azgp6tfdbzz5ux3423eajsita",
|
||||||
|
"bagmacgzarsrnwni34m76ucixyqhwmzjzdoj4xyqzcepbbxzzg5kim7edr7dq",
|
||||||
|
"bagmacgza7dy7xmpxwsbntbqeqd7oxob76vfiw3wb5llbzr6s6joxyalft6oa",
|
||||||
|
"bagmacgzaxfz6yd2i64il66pwg2eeqv2vzpuh7hkmnazgxob4e2xwecacvaha",
|
||||||
|
"bagmacgzaxrdsjyn4vafqvzadwgre564iakz2owgrueiyjr7nh7evfwksnizq",
|
||||||
|
"bagmacgzaxqrzefztg4772fnaxzrwhela4py4iybnsucowa2ybg3jolflfdba",
|
||||||
|
"bagmacgza6ccvgsnpnp4ev7elzixnumoi56gfcon6deu65m62jotlncubrsya",
|
||||||
|
"bagmacgzayjy6dcno5mo3lvm5p7uh27lde656pt5drfqzafsfsgles7pdztpa",
|
||||||
|
"bagmacgza2ved5k3y3gr3yqiixnhlzwelsmbxmyknsvg4ci4jiltww5alcxma",
|
||||||
|
"bagmacgzamq3lujnpelx5hm2l6heowtohkwhuliyq6r34yty4hrurctkscnla",
|
||||||
|
"bagmacgza45idxjlztz32umn34eyqymjmuf5syw6mr6ry6jtgoxupcvgckfvq",
|
||||||
|
"bagmacgzafi3v5u4p4fgckxsrbf4u3zz64gfszz7pyihxhqio7ztn77yjwcqq",
|
||||||
|
"bagmacgzatjwpysdg24pamvqso3g4tjchz72pdxsqweyuubc2jrdeusscvmra",
|
||||||
|
"bagmacgzasj4lqrtjnu3scovz2iff5nblapntc46ojefc545s6ozwablz7rrq",
|
||||||
|
"bagmacgzas7lcbavos6lvsurhbzlpekgh35dgarm7nye26e7wwrooolwfbpnq",
|
||||||
|
"bagmacgzasmhzm736xpvahwm6jogaqeuieqsteffkfxfsq4gm6eb4q35a5d5a",
|
||||||
|
"bagmacgzaw4bsyt4rnl5koaclh3bkzwk6ez72sj6j5ghsks5a2r675l3tyytq",
|
||||||
|
"bagmacgzacmg7rh342shchhjofzwlwxrej2psqkf43jurovkweqpniytdzvha",
|
||||||
|
"bagmacgzacy2ji662bc7ppplvkyxlvjxqiwyo4j2ie4xtck6l2zwtbf2w3i7a",
|
||||||
|
"bagmacgza5ecbawirj6ojccw6zijnxoq75543fywirgps24qtzurn7zbravqq",
|
||||||
|
"bagmacgza2vdmjsrcpith2klzmzbqgjbcg5dcj3iqtm6zjbemlxagxlhk5z3a",
|
||||||
|
"bagmacgzae7ci4iimzrxac2dl4lkkdgotl4hb5dpwesunhil4cy56rbq2zvta",
|
||||||
|
"bagmacgzai7cz3jllwk7tjde52kror5ktrkjlsbfwmhh6kssctc4fq2f34scq",
|
||||||
|
"bagmacgzabu4xfmjm7dg6rf2fjjn62f57ilrchh3v4gbf62erabtzu5wm2gxq",
|
||||||
|
"bagmacgzanjgius6avm37j2fq46oahss3cw4g5ntlfjzf5sbtguzppyai6pta",
|
||||||
|
"bafkrwibagt3z4drtwcxgx34uquzaeg5m5miwvxzgczdyoa56y2yxgkprzq",
|
||||||
|
"baglacgza5n2ivltmbqypzfjptsvbzvlvhpbcbzlr7xj6xb7zaallj3q3bu4a",
|
||||||
|
"baglacgzal5gkbdbs4srzs7iostmji3r5gypmlubclwonqxdn5dkxfoyktheq",
|
||||||
|
"baglacgzaeggi6pqszfefbd2or7verp6bbz6b7ctkszxi6yalsypnivkrc47a",
|
||||||
|
"baglacgzawxfq5gj2pt53idroosz6eahmfmrwxuz5fpciiwmiuts7l4a6k2eq",
|
||||||
|
"baglacgzaj46wxqbpstd5eicctecpdxhffmbuenzqmd3bt5jdjykdr7aeo3aa",
|
||||||
|
"baglacgza7lwpiwksommncl7ofw4nqxcu7qse2aqhxizwuapds5mtxaa24ypq",
|
||||||
|
"baglacgza7wkyigp25224rkrivwellawayv3y3r4mobbqc6xxmgscxgiq3gea",
|
||||||
|
"baglacgzazrwcvecxj5bq6pyshnxvp35apsxcdtfzacvfbvsrnaa2vag4wnza",
|
||||||
|
"baglacgzabchzwz3pjqtrnx35rjav3gmxeh6sbw3l7mjpwrb6gbiz5r4ltcgq",
|
||||||
|
"baglacgzaokokv2ioov6fjlkgkufj4yrplnxdw47r4rqhighqnb354ea4jaaq",
|
||||||
|
"baglacgza5gcozkl7fbpnys3d7uqzmawqsuvic5lrti4hznllfferepgxojja",
|
||||||
|
"baglacgza34suygwx22xxdd2fynck4x6fjrrhoaxloeni45znn5ewpk3g7lea",
|
||||||
|
"baglacgzasrizkrumchv6zypcuhr5fmtz66ej5cnup5sjbapxpj27ttj3u5xq",
|
||||||
|
"baglacgzad3w24kle2itl3jm2kxq6cysoj4xoflsrhrw55msc6meagt6laetq",
|
||||||
|
"baglacgzazixckhuckariike5abthcbdjgmgz5rcysbuaucijz5d7a3avqvpa",
|
||||||
|
"baglacgzapdoq2uowvqcis3dlzxug57bwzas2dyhefu3f4frrqdz3yknzdxtq",
|
||||||
|
"baglacgzabbcknaso72duwyoeqd4i2gyghf4avilk565nkzduap6h5jwcosza",
|
||||||
|
}
|
||||||
|
)
|
525
test/fixture_chain_B.go
Normal file
525
test/fixture_chain_B.go
Normal file
@ -0,0 +1,525 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/cerc-io/eth-testing/chains/premerge1"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ChainB = premerge1.ChainData
|
||||||
|
|
||||||
|
var ChainB_block1_Header = types.Header{
|
||||||
|
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
|
||||||
|
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||||
|
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
||||||
|
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
|
||||||
|
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
Bloom: types.Bloom{},
|
||||||
|
Difficulty: big.NewInt(+2),
|
||||||
|
Number: big.NewInt(+1),
|
||||||
|
GasLimit: 4704588,
|
||||||
|
GasUsed: 0,
|
||||||
|
Time: 1492010458,
|
||||||
|
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
|
||||||
|
MixDigest: common.Hash{},
|
||||||
|
Nonce: types.BlockNonce{},
|
||||||
|
BaseFee: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
var chainB_block1_stateNodeRLP = []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
|
||||||
|
var chainB_block1_stateNodeCID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(chainB_block1_stateNodeRLP))
|
||||||
|
var block_stateNodeLeafKey = "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb"
|
||||||
|
|
||||||
|
var ChainB_block1_StateNodeIPLD = models.IPLDModel{
|
||||||
|
BlockNumber: ChainB_block1_Header.Number.String(),
|
||||||
|
Key: chainB_block1_stateNodeCID.String(),
|
||||||
|
Data: chainB_block1_stateNodeRLP,
|
||||||
|
}
|
||||||
|
|
||||||
|
var ChainB_block1_EmptyRootNodeRLP, _ = rlp.EncodeToBytes([]byte{})
|
||||||
|
|
||||||
|
var ChainB_block1_StateNode0 = models.StateNodeModel{
|
||||||
|
BlockNumber: ChainB_block1_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_block1_Header.Hash().Hex(),
|
||||||
|
CID: chainB_block1_stateNodeCID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "1000",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
||||||
|
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: block_stateNodeLeafKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
var chainB_block1_storageNodeRLP = []byte{3, 111, 15, 5, 141, 92, 6, 120, 63, 191, 125, 121, 193, 230, 153, 7, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 2, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 32, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 245, 71, 140, 90, 255, 89, 131, 99, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
|
||||||
|
var chainB_block1_storageNodeCID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_block1_storageNodeRLP))
|
||||||
|
|
||||||
|
var ChainB_block1_StorageNodeIPLD = models.IPLDModel{
|
||||||
|
BlockNumber: ChainB_block1_Header.Number.String(),
|
||||||
|
Key: chainB_block1_storageNodeCID.String(),
|
||||||
|
Data: chainB_block1_storageNodeRLP,
|
||||||
|
}
|
||||||
|
|
||||||
|
var ChainB_block1_StorageNode0 = models.StorageNodeModel{
|
||||||
|
BlockNumber: ChainB_block1_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_block1_Header.Hash().Hex(),
|
||||||
|
StateKey: block_stateNodeLeafKey,
|
||||||
|
StorageKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
Removed: false,
|
||||||
|
CID: chainB_block1_storageNodeCID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Value: []byte{1},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header for last block at height 32
|
||||||
|
var ChainB_Block32_Header = types.Header{
|
||||||
|
ParentHash: common.HexToHash("0x6983c921c053d1f637449191379f61ba844013c71e5ebfacaff77f8a8bd97042"),
|
||||||
|
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||||
|
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
||||||
|
Root: common.HexToHash("0xeaa5866eb37e33fc3cfe1376b2ad7f465e7213c14e6834e1cfcef9552b2e5d5d"),
|
||||||
|
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
Bloom: types.Bloom{},
|
||||||
|
Difficulty: big.NewInt(2),
|
||||||
|
Number: big.NewInt(32),
|
||||||
|
GasLimit: 8253773,
|
||||||
|
GasUsed: 0,
|
||||||
|
Time: 1658408469,
|
||||||
|
Extra: []byte{216, 131, 1, 10, 19, 132, 103, 101, 116, 104, 136, 103, 111, 49, 46, 49, 56, 46, 50, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 113, 250, 240, 25, 148, 32, 193, 94, 196, 10, 99, 63, 251, 130, 170, 0, 176, 201, 149, 55, 230, 58, 218, 112, 84, 153, 122, 83, 134, 52, 176, 99, 53, 54, 63, 12, 226, 81, 38, 176, 57, 117, 92, 205, 237, 81, 203, 232, 220, 228, 166, 254, 206, 136, 7, 253, 2, 61, 47, 217, 235, 24, 140, 92, 1},
|
||||||
|
MixDigest: common.Hash{},
|
||||||
|
Nonce: types.BlockNonce{},
|
||||||
|
BaseFee: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
// State nodes for all paths at height 32
|
||||||
|
// Total 7
|
||||||
|
var ChainB_Block32_stateNode0RLP = []byte{248, 145, 128, 128, 128, 160, 151, 6, 152, 177, 246, 151, 39, 79, 71, 219, 192, 153, 253, 0, 46, 66, 56, 238, 116, 176, 237, 244, 79, 132, 49, 29, 30, 82, 108, 53, 191, 204, 128, 128, 160, 46, 224, 200, 157, 30, 24, 225, 92, 222, 131, 123, 169, 124, 86, 228, 124, 79, 136, 236, 83, 185, 22, 67, 136, 5, 73, 46, 110, 136, 138, 101, 63, 128, 128, 160, 104, 220, 31, 84, 240, 26, 100, 148, 110, 49, 52, 120, 81, 119, 30, 251, 196, 107, 11, 134, 124, 238, 93, 61, 109, 109, 181, 208, 10, 189, 17, 92, 128, 128, 160, 171, 149, 11, 254, 75, 39, 224, 164, 133, 151, 153, 47, 109, 134, 15, 169, 139, 206, 132, 93, 220, 210, 0, 225, 235, 118, 121, 247, 173, 12, 135, 133, 128, 128, 128, 128}
|
||||||
|
var ChainB_Block32_stateNode0CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode0RLP))
|
||||||
|
var ChainB_Block32_stateNode1RLP = []byte{248, 81, 128, 128, 128, 160, 209, 34, 171, 171, 30, 147, 168, 199, 137, 152, 249, 118, 14, 166, 1, 169, 116, 224, 82, 196, 237, 83, 255, 188, 228, 197, 7, 178, 144, 137, 77, 55, 128, 128, 128, 128, 128, 160, 135, 96, 108, 173, 177, 63, 201, 196, 26, 204, 72, 118, 17, 30, 76, 117, 155, 63, 68, 187, 4, 249, 78, 69, 161, 82, 178, 234, 164, 48, 158, 173, 128, 128, 128, 128, 128, 128, 128}
|
||||||
|
var ChainB_Block32_stateNode1CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode1RLP))
|
||||||
|
var ChainB_Block32_stateNode2RLP = []byte{248, 105, 160, 32, 21, 58, 188, 102, 126, 135, 59, 96, 54, 200, 164, 107, 221, 132, 126, 42, 222, 63, 137, 185, 51, 28, 120, 239, 37, 83, 254, 161, 148, 197, 13, 184, 70, 248, 68, 1, 128, 160, 168, 127, 48, 6, 204, 116, 51, 247, 216, 182, 191, 182, 185, 124, 223, 202, 239, 15, 67, 91, 253, 165, 42, 2, 54, 10, 211, 250, 242, 149, 205, 139, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
|
||||||
|
var ChainB_Block32_stateNode2CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode2RLP))
|
||||||
|
var ChainB_Block32_stateNode3RLP = []byte{248, 105, 160, 32, 252, 41, 63, 199, 2, 228, 43, 156, 2, 63, 9, 72, 38, 84, 93, 180, 47, 192, 253, 242, 186, 3, 27, 181, 34, 213, 239, 145, 122, 110, 219, 184, 70, 248, 68, 1, 128, 160, 25, 80, 158, 144, 166, 222, 32, 247, 189, 42, 34, 60, 40, 240, 56, 105, 251, 184, 132, 209, 219, 59, 60, 16, 221, 204, 228, 74, 76, 113, 37, 226, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
|
||||||
|
var ChainB_Block32_stateNode3CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode3RLP))
|
||||||
|
var ChainB_Block32_stateNode4RLP = []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 10, 141, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}
|
||||||
|
var ChainB_Block32_stateNode4CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode4RLP))
|
||||||
|
var ChainB_Block32_stateNode5RLP = []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
|
||||||
|
var ChainB_Block32_stateNode5CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode5RLP))
|
||||||
|
var ChainB_Block32_stateNode6RLP = []byte{248, 105, 160, 58, 188, 94, 219, 48, 85, 131, 227, 63, 102, 50, 44, 238, 228, 48, 136, 170, 153, 39, 125, 167, 114, 254, 181, 5, 53, 18, 208, 58, 10, 112, 43, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
|
||||||
|
var ChainB_Block32_stateNode6CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode6RLP))
|
||||||
|
|
||||||
|
var ChainB_Block32_StateIPLDs = []models.IPLDModel{
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode0CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode0RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode1CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode1RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode2CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode2RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode3CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode3RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode4CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode4RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode5CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode5RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: ChainB_Block32_stateNode6CID.String(),
|
||||||
|
Data: ChainB_Block32_stateNode6RLP,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var ChainB_Block32_StateNodes = []models.StateNodeModel{
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
CID: ChainB_Block32_stateNode2CID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "0",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: common.HexToHash("0xe0168c08741b4f7140b9d7b42626eca40557d30f58998ab95eba7d89a4c68dc0").Hex(),
|
||||||
|
StorageRoot: common.HexToHash("0xa87f3006cc7433f7d8b6bfb6b97cdfcaef0f435bfda52a02360ad3faf295cd8b").Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
CID: ChainB_Block32_stateNode3CID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "1000",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
||||||
|
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
CID: ChainB_Block32_stateNode4CID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "1000",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
||||||
|
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: "0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
CID: ChainB_Block32_stateNode5CID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "1000",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
||||||
|
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
CID: ChainB_Block32_stateNode6CID.String(),
|
||||||
|
Diff: false,
|
||||||
|
Balance: "0",
|
||||||
|
Nonce: 1,
|
||||||
|
CodeHash: common.HexToHash("0x1d503a68ce8d245d7cd9435db72b4762727e7c69e530dac26d53144c0d669c82").Hex(),
|
||||||
|
StorageRoot: common.HexToHash("0x36ae6021f3ba7178bcdefed23f2804829a9c42f7825d5871904e2ffcae8c822d").Hex(),
|
||||||
|
Removed: false,
|
||||||
|
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage nodes for all paths at height 32
|
||||||
|
// Total 18
|
||||||
|
var chainB_Block32_storageNode0RLP = []byte{248, 145, 128, 128, 128, 128, 160, 46, 77, 227, 140, 57, 224, 108, 238, 40, 82, 145, 79, 210, 174, 54, 248, 0, 145, 137, 64, 229, 230, 148, 145, 250, 132, 89, 198, 8, 249, 245, 133, 128, 160, 146, 250, 117, 217, 106, 75, 51, 124, 196, 244, 29, 16, 47, 173, 5, 90, 86, 19, 15, 48, 179, 174, 60, 171, 112, 154, 92, 70, 232, 164, 141, 165, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
|
||||||
|
var chainB_Block32_storageNode0CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode0RLP))
|
||||||
|
var chainB_Block32_storageNode1RLP = []byte{248, 81, 160, 167, 145, 134, 15, 219, 140, 96, 62, 101, 242, 176, 129, 164, 160, 200, 221, 13, 1, 246, 167, 156, 45, 205, 192, 88, 236, 235, 80, 105, 178, 123, 2, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 18, 136, 22, 150, 26, 170, 67, 152, 182, 246, 95, 49, 193, 199, 219, 163, 97, 25, 243, 70, 126, 235, 163, 59, 44, 16, 37, 37, 247, 50, 229, 70, 128, 128}
|
||||||
|
var chainB_Block32_storageNode1CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode1RLP))
|
||||||
|
var chainB_Block32_storageNode2RLP = []byte{236, 160, 32, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
||||||
|
var chainB_Block32_storageNode2CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode2RLP))
|
||||||
|
var chainB_Block32_storageNode3RLP = []byte{226, 160, 32, 44, 236, 111, 71, 132, 84, 126, 80, 66, 161, 99, 128, 134, 227, 24, 137, 41, 243, 79, 60, 0, 5, 248, 222, 195, 102, 201, 110, 129, 149, 172, 100}
|
||||||
|
var chainB_Block32_storageNode3CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode3RLP))
|
||||||
|
var chainB_Block32_storageNode4RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 159, 255, 156}
|
||||||
|
var chainB_Block32_storageNode4CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode4RLP))
|
||||||
|
var chainB_Block32_storageNode5RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
|
||||||
|
var chainB_Block32_storageNode5CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode5RLP))
|
||||||
|
var chainB_Block32_storageNode6RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
|
||||||
|
var chainB_Block32_storageNode6CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode6RLP))
|
||||||
|
var chainB_Block32_storageNode7RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
|
||||||
|
var chainB_Block32_storageNode7CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode7RLP))
|
||||||
|
var chainB_Block32_storageNode8RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
|
||||||
|
var chainB_Block32_storageNode8CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode8RLP))
|
||||||
|
var chainB_Block32_storageNode9RLP = []byte{248, 145, 128, 128, 128, 128, 160, 145, 86, 15, 219, 52, 36, 164, 68, 160, 227, 156, 111, 1, 245, 112, 184, 187, 242, 26, 138, 8, 98, 129, 35, 57, 212, 165, 21, 204, 151, 229, 43, 128, 160, 250, 205, 84, 126, 141, 108, 126, 228, 162, 8, 238, 234, 141, 159, 232, 175, 70, 112, 207, 55, 165, 209, 107, 153, 54, 183, 60, 172, 194, 251, 66, 61, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
|
||||||
|
var chainB_Block32_storageNode9CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode9RLP))
|
||||||
|
var chainB_Block32_storageNode10RLP = []byte{236, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
||||||
|
var chainB_Block32_storageNode10CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode10RLP))
|
||||||
|
var chainB_Block32_storageNode11RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
||||||
|
var chainB_Block32_storageNode11CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode11RLP))
|
||||||
|
var chainB_Block32_storageNode12RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
|
||||||
|
var chainB_Block32_storageNode12CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode12RLP))
|
||||||
|
var chainB_Block32_storageNode13RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
|
||||||
|
var chainB_Block32_storageNode13CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode13RLP))
|
||||||
|
var chainB_Block32_storageNode14RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
|
||||||
|
var chainB_Block32_storageNode14CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode14RLP))
|
||||||
|
var chainB_Block32_storageNode15RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
|
||||||
|
var chainB_Block32_storageNode15CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode15RLP))
|
||||||
|
var chainB_Block32_storageNode16RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
|
||||||
|
var chainB_Block32_storageNode16CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode16RLP))
|
||||||
|
var chainB_Block32_storageNode17RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
|
||||||
|
var chainB_Block32_storageNode17CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode17RLP))
|
||||||
|
|
||||||
|
var ChainB_Block32_StorageIPLDs = []models.IPLDModel{
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode0CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode0RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode1CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode1RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode2CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode2RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode3CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode3RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode4CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode4RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode5CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode5RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode6CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode6RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode7CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode7RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode8CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode8RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode9CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode9RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode10CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode10RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode11CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode11RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode12CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode12RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode13CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode13RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode14CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode14RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode15CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode15RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode16CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode16RLP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
Key: chainB_Block32_storageNode17CID.String(),
|
||||||
|
Data: chainB_Block32_storageNode17RLP,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var ChainB_Block32_StorageNodes = []models.StorageNodeModel{
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
|
||||||
|
CID: chainB_Block32_storageNode2CID.String(),
|
||||||
|
Value: []byte{137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
}, // 0
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x4e2cec6f4784547e5042a1638086e3188929f34f3c0005f8dec366c96e8195ac",
|
||||||
|
CID: chainB_Block32_storageNode3CID.String(),
|
||||||
|
Value: []byte{100},
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
}, // 1
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
|
||||||
|
CID: chainB_Block32_storageNode4CID.String(),
|
||||||
|
Value: []byte{137, 54, 53, 201, 173, 197, 222, 159, 255, 156},
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
}, // 2
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
|
||||||
|
CID: chainB_Block32_storageNode5CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
||||||
|
}, // 3
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
|
||||||
|
CID: chainB_Block32_storageNode6CID.String(),
|
||||||
|
Value: []byte{160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6},
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
}, // 4
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
|
||||||
|
CID: chainB_Block32_storageNode7CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
||||||
|
}, // 5
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
|
||||||
|
CID: chainB_Block32_storageNode8CID.String(),
|
||||||
|
Value: []byte{160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8},
|
||||||
|
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
||||||
|
}, // 6
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
|
||||||
|
CID: chainB_Block32_storageNode10CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
||||||
|
}, // 7
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
|
||||||
|
CID: chainB_Block32_storageNode11CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
||||||
|
}, // 8
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
|
||||||
|
CID: chainB_Block32_storageNode14CID.String(),
|
||||||
|
Value: []byte{'\x01'},
|
||||||
|
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
||||||
|
}, // 9
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
|
||||||
|
CID: chainB_Block32_storageNode15CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
||||||
|
}, // 10
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
|
||||||
|
CID: chainB_Block32_storageNode16CID.String(),
|
||||||
|
Value: []byte{'\x04'},
|
||||||
|
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
||||||
|
}, // 11
|
||||||
|
{
|
||||||
|
BlockNumber: ChainB_Block32_Header.Number.String(),
|
||||||
|
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
||||||
|
Diff: false,
|
||||||
|
Removed: false,
|
||||||
|
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
|
||||||
|
CID: chainB_Block32_storageNode17CID.String(),
|
||||||
|
Value: []byte{},
|
||||||
|
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
||||||
|
}, // 12
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contracts used in chainB
|
||||||
|
/*
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
contract Test {
|
||||||
|
uint256 private count;
|
||||||
|
uint256 private count2;
|
||||||
|
|
||||||
|
event Increment(uint256 count);
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
count2 = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
function incrementCount() public returns (uint256) {
|
||||||
|
count = count + 1;
|
||||||
|
emit Increment(count);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
function destroy() public {
|
||||||
|
selfdestruct(payable(msg.sender));
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteCount2() public {
|
||||||
|
count2 = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
||||||
|
|
||||||
|
contract GLDToken is ERC20 {
|
||||||
|
constructor(uint256 initialSupply) ERC20("Gold", "GLD") {
|
||||||
|
_mint(msg.sender, initialSupply);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
@ -1,59 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
ethnode "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultNodeInfo = ethnode.Info{
|
|
||||||
ID: "test_nodeid",
|
|
||||||
ClientName: "test_client",
|
|
||||||
GenesisBlock: "TEST_GENESIS",
|
|
||||||
NetworkID: "test_network",
|
|
||||||
ChainID: 0,
|
|
||||||
}
|
|
||||||
DefaultPgConfig = postgres.Config{
|
|
||||||
Hostname: "localhost",
|
|
||||||
Port: 8077,
|
|
||||||
DatabaseName: "vulcanize_testing",
|
|
||||||
Username: "vdbm",
|
|
||||||
Password: "password",
|
|
||||||
|
|
||||||
MaxIdle: 0,
|
|
||||||
MaxConnLifetime: 0,
|
|
||||||
MaxConns: 4,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func NeedsDB(t *testing.T) {
|
|
||||||
t.Helper()
|
|
||||||
if os.Getenv("TEST_WITH_DB") == "" {
|
|
||||||
t.Skip("set TEST_WITH_DB to enable test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NoError(t *testing.T, err error) {
|
|
||||||
t.Helper()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpectEqual asserts the provided interfaces are deep equal
|
|
||||||
func ExpectEqual(t *testing.T, want, got interface{}) {
|
|
||||||
if !reflect.DeepEqual(want, got) {
|
|
||||||
t.Fatalf("Values not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExpectEqualBytes(t *testing.T, want, got []byte) {
|
|
||||||
if !bytes.Equal(want, got) {
|
|
||||||
t.Fatalf("Bytes not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user