Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
cb92f8f949 |
@ -1,143 +0,0 @@
|
|||||||
name: Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: '*'
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
env:
|
|
||||||
CANONICAL_VERSION: v5.0.4-alpha
|
|
||||||
ETH_TESTING_REF: v0.5.1
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build Docker image
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- name: Build docker image
|
|
||||||
run: docker build .
|
|
||||||
|
|
||||||
unit-test:
|
|
||||||
name: Run unit tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- name: Install test fixtures
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: cerc-io/eth-testing
|
|
||||||
path: ./fixtures
|
|
||||||
ref: ${{ env.ETH_TESTING_REF }}
|
|
||||||
- name: Run unit tests
|
|
||||||
run: make test
|
|
||||||
|
|
||||||
integration-test:
|
|
||||||
name: Run integration tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version-file: go.mod
|
|
||||||
check-latest: true
|
|
||||||
- name: Install test fixtures
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: cerc-io/eth-testing
|
|
||||||
path: ./fixtures
|
|
||||||
ref: ${{ env.ETH_TESTING_REF }}
|
|
||||||
- name: Build package
|
|
||||||
run: go build .
|
|
||||||
- name: Run DB container
|
|
||||||
run: docker compose -f test/compose.yml up --wait
|
|
||||||
|
|
||||||
# Run a sanity test against the fixture data
|
|
||||||
# Complete integration tests are TODO
|
|
||||||
- name: Run basic integration test
|
|
||||||
env:
|
|
||||||
SNAPSHOT_MODE: postgres
|
|
||||||
ETHDB_PATH: ./fixtures/chains/data/postmerge1/geth/chaindata
|
|
||||||
ETH_GENESIS_BLOCK: 0x66ef6002e201cfdb23bd3f615fcf41e59d8382055e5a836f8d4c2af0d484647c
|
|
||||||
SNAPSHOT_BLOCK_HEIGHT: 170
|
|
||||||
run: |
|
|
||||||
until
|
|
||||||
ready_query='select max(version_id) from goose_db_version;'
|
|
||||||
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
|
||||||
psql -tA cerc_testing -U vdbm -c "$ready_query")
|
|
||||||
[[ "$version" -ge 21 ]]
|
|
||||||
do
|
|
||||||
echo "Waiting for ipld-eth-db..."
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
|
|
||||||
./ipld-eth-state-snapshot --config test/ci-config.toml stateSnapshot
|
|
||||||
|
|
||||||
count_results() {
|
|
||||||
query="select count(*) from $1;"
|
|
||||||
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
|
||||||
psql -tA cerc_testing -U vdbm -c "$query"
|
|
||||||
}
|
|
||||||
set -x
|
|
||||||
[[ "$(count_results eth.header_cids)" = 1 ]]
|
|
||||||
[[ "$(count_results eth.state_cids)" = 264 ]]
|
|
||||||
[[ "$(count_results eth.storage_cids)" = 371 ]]
|
|
||||||
|
|
||||||
compliance-test:
|
|
||||||
name: Run compliance tests (disabled)
|
|
||||||
# Schema has been updated, so compliance tests are disabled until we have a meaningful way to
|
|
||||||
# compare to previous results.
|
|
||||||
if: false
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
path: ./ipld-eth-state-snapshot
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version-file: ./ipld-eth-state-snapshot/go.mod
|
|
||||||
check-latest: true
|
|
||||||
- name: Install test fixtures
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: cerc-io/eth-testing
|
|
||||||
path: ./fixtures
|
|
||||||
ref: ${{ env.ETH_TESTING_REF }}
|
|
||||||
- name: Build current version
|
|
||||||
working-directory: ./ipld-eth-state-snapshot
|
|
||||||
run: go build -o ../snapshot-current .
|
|
||||||
|
|
||||||
- name: Checkout canonical version
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
path: ./ipld-eth-state-snapshot-canonical
|
|
||||||
ref: ${{ env.CANONICAL_VERSION }}
|
|
||||||
- name: Build canonical version
|
|
||||||
working-directory: ./ipld-eth-state-snapshot-canonical
|
|
||||||
run: go build -o ../snapshot-canonical .
|
|
||||||
|
|
||||||
- name: Run DB container
|
|
||||||
working-directory: ./ipld-eth-state-snapshot
|
|
||||||
run: docker compose -f test/compose.yml up --wait
|
|
||||||
- name: Compare snapshot output
|
|
||||||
env:
|
|
||||||
SNAPSHOT_BLOCK_HEIGHT: 200
|
|
||||||
ETHDB_PATH: ./fixtures/chains/data/premerge2/geth/chaindata
|
|
||||||
ETHDB_ANCIENT: ./fixtures/chains/data/premerge2/geth/chaindata/ancient
|
|
||||||
ETH_GENESIS_BLOCK: "0x8a3c7cddacbd1ab4ec1b03805fa2a287f3a75e43d87f4f987fcc399f5c042614"
|
|
||||||
run: |
|
|
||||||
until
|
|
||||||
ready_query='select max(version_id) from goose_db_version;'
|
|
||||||
version=$(docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
|
||||||
psql -tA cerc_testing -U vdbm -c "$ready_query")
|
|
||||||
[[ "$version" -ge 21 ]]
|
|
||||||
do sleep 1; done
|
|
||||||
|
|
||||||
./ipld-eth-state-snapshot/scripts/compare-snapshots.sh \
|
|
||||||
./snapshot-canonical ./snapshot-current
|
|
29
.github/workflows/issues-notion-sync.yml
vendored
Normal file
29
.github/workflows/issues-notion-sync.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
name: Notion Sync
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
issues:
|
||||||
|
types:
|
||||||
|
[
|
||||||
|
opened,
|
||||||
|
edited,
|
||||||
|
labeled,
|
||||||
|
unlabeled,
|
||||||
|
assigned,
|
||||||
|
unassigned,
|
||||||
|
milestoned,
|
||||||
|
demilestoned,
|
||||||
|
reopened,
|
||||||
|
closed,
|
||||||
|
]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
notion_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Add GitHub Issues to Notion
|
||||||
|
steps:
|
||||||
|
- name: Add GitHub Issues to Notion
|
||||||
|
uses: vulcanize/notion-github-action@v1.2.4-issueid
|
||||||
|
with:
|
||||||
|
notion-token: ${{ secrets.NOTION_TOKEN }}
|
||||||
|
notion-db: ${{ secrets.NOTION_DATABASE }}
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,6 +1,4 @@
|
|||||||
.idea/
|
.idea/
|
||||||
.vscode/
|
.vscode/
|
||||||
ipld-eth-state-snapshot
|
ipld-eth-state-snapshot
|
||||||
output_dir*/
|
mocks/
|
||||||
log_file
|
|
||||||
recovery_file
|
|
||||||
|
31
Dockerfile
31
Dockerfile
@ -1,31 +0,0 @@
|
|||||||
FROM golang:1.21-alpine AS builder
|
|
||||||
|
|
||||||
RUN apk add --no-cache git gcc musl-dev binutils-gold
|
|
||||||
# DEBUG
|
|
||||||
RUN apk add busybox-extras
|
|
||||||
|
|
||||||
WORKDIR /ipld-eth-state-snapshot
|
|
||||||
|
|
||||||
ARG GIT_VDBTO_TOKEN
|
|
||||||
|
|
||||||
COPY go.mod go.sum ./
|
|
||||||
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
|
||||||
go mod download && \
|
|
||||||
rm -f ~/.gitconfig
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
RUN go build -ldflags '-extldflags "-static"' -o ipld-eth-state-snapshot .
|
|
||||||
|
|
||||||
FROM alpine
|
|
||||||
|
|
||||||
RUN apk --no-cache add su-exec bash
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
COPY --from=builder /ipld-eth-state-snapshot/startup_script.sh .
|
|
||||||
COPY --from=builder /ipld-eth-state-snapshot/environments environments
|
|
||||||
|
|
||||||
# keep binaries immutable
|
|
||||||
COPY --from=builder /ipld-eth-state-snapshot/ipld-eth-state-snapshot ipld-eth-state-snapshot
|
|
||||||
|
|
||||||
ENTRYPOINT ["/app/startup_script.sh"]
|
|
24
Makefile
24
Makefile
@ -1,13 +1,19 @@
|
|||||||
MOCKGEN ?= mockgen
|
MOCKS_DIR = $(CURDIR)/mocks
|
||||||
MOCKS_DIR := $(CURDIR)/internal/mocks
|
mockgen_cmd=mockgen
|
||||||
|
|
||||||
mocks: $(MOCKS_DIR)/gen_indexer.go
|
.PHONY: mocks test
|
||||||
.PHONY: mocks
|
|
||||||
|
|
||||||
$(MOCKS_DIR)/gen_indexer.go:
|
mocks: mocks/snapshot/publisher.go
|
||||||
$(MOCKGEN) --package mocks --destination $@ \
|
|
||||||
--mock_names Indexer=MockgenIndexer \
|
mocks/snapshot/publisher.go: pkg/types/publisher.go
|
||||||
github.com/cerc-io/plugeth-statediff/indexer Indexer
|
$(mockgen_cmd) -package snapshot_mock -destination $@ -source $< Publisher Tx
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f mocks/snapshot/publisher.go
|
||||||
|
|
||||||
|
build:
|
||||||
|
go fmt ./...
|
||||||
|
go build
|
||||||
|
|
||||||
test: mocks
|
test: mocks
|
||||||
go clean -testcache && go test -p 1 -v ./...
|
go clean -testcache && go test -v ./...
|
||||||
|
200
README.md
200
README.md
@ -1,209 +1,59 @@
|
|||||||
# ipld-eth-state-snapshot
|
# ipld-eth-state-snapshot
|
||||||
|
|
||||||
> Tool for extracting the entire Ethereum state at a particular block height from a cold database into Postgres-backed IPFS
|
> Tool for extracting the entire Ethereum state at a particular block height from leveldb into Postgres-backed IPFS
|
||||||
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipld-eth-state-snapshot)](https://goreportcard.com/report/github.com/vulcanize/ipld-eth-state-snapshot)
|
||||||
|
|
||||||
## Setup
|
## Usage
|
||||||
|
|
||||||
* Build the binary:
|
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
|
||||||
|
|
||||||
```bash
|
### Config
|
||||||
make build
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Config format:
|
Config format:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
[snapshot]
|
[snapshot]
|
||||||
mode = "file" # indicates output mode <postgres | file>
|
mode = "file" # indicates output mode ("postgres" or "file")
|
||||||
workers = 4 # degree of concurrency: the state trie is subdivided into sections that are traversed and processed concurrently
|
workers = 4 # degree of concurrency, the state trie is subdivided into sectiosn that are traversed and processed concurrently
|
||||||
blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in ethdb)
|
blockHeight = -1 # blockheight to perform the snapshot at (-1 indicates to use the latest blockheight found in leveldb)
|
||||||
recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure
|
recoveryFile = "recovery_file" # specifies a file to output recovery information on error or premature closure
|
||||||
accounts = [] # list of accounts (addresses) to take the snapshot for # SNAPSHOT_ACCOUNTS
|
|
||||||
|
|
||||||
[ethdb]
|
[leveldb]
|
||||||
# path to geth ethdb
|
path = "/Users/user/Library/Ethereum/geth/chaindata" # path to geth leveldb
|
||||||
path = "/Users/user/Library/Ethereum/geth/chaindata" # ETHDB_PATH
|
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # path to geth ancient database
|
||||||
# path to geth ancient database
|
|
||||||
ancient = "/Users/user/Library/Ethereum/geth/chaindata/ancient" # ETHDB_ANCIENT
|
|
||||||
|
|
||||||
[database]
|
[database]
|
||||||
# when operating in 'postgres' output mode
|
name = "vulcanize_public" # postgres database name
|
||||||
# db credentials
|
hostname = "localhost" # postgres host
|
||||||
name = "vulcanize_public" # DATABASE_NAME
|
port = 5432 # postgres port
|
||||||
hostname = "localhost" # DATABASE_HOSTNAME
|
user = "postgres" # postgres user
|
||||||
port = 5432 # DATABASE_PORT
|
password = "" # postgres password
|
||||||
user = "postgres" # DATABASE_USER
|
|
||||||
password = "" # DATABASE_PASSWORD
|
|
||||||
|
|
||||||
[file]
|
[file]
|
||||||
# when operating in 'file' output mode
|
outputDir = "output_dir/" # when operating in 'file' output mode, this is the directory the files are written to
|
||||||
# directory the CSV files are written to
|
|
||||||
outputDir = "output_dir/" # FILE_OUTPUT_DIR
|
|
||||||
|
|
||||||
[log]
|
[log]
|
||||||
level = "info" # log level (trace, debug, info, warn, error, fatal, panic) (default: info)
|
level = "info" # log level (trace, debug, info, warn, error, fatal, panic) (default: info)
|
||||||
file = "log_file" # file path for logging, leave unset to log to stdout
|
file = "log_file" # file path for logging
|
||||||
|
|
||||||
[prom]
|
[prom]
|
||||||
# prometheus metrics
|
|
||||||
metrics = true # enable prometheus metrics (default: false)
|
metrics = true # enable prometheus metrics (default: false)
|
||||||
http = true # enable prometheus http service (default: false)
|
http = true # enable prometheus http service (default: false)
|
||||||
httpAddr = "0.0.0.0" # prometheus http host (default: 127.0.0.1)
|
httpAddr = "0.0.0.0" # prometheus http host (default: 127.0.0.1)
|
||||||
httpPort = 9101 # prometheus http port (default: 8086)
|
httpPort = 9101 # prometheus http port (default: 8086)
|
||||||
dbStats = true # enable prometheus db stats (default: false)
|
dbStats = true # enable prometheus db stats (default: false)
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
# node info
|
# node info
|
||||||
clientName = "Geth" # ETH_CLIENT_NAME
|
[ethereum]
|
||||||
nodeID = "arch1" # ETH_NODE_ID
|
clientName = "Geth" # $ETH_CLIENT_NAME
|
||||||
networkID = "1" # ETH_NETWORK_ID
|
nodeID = "arch1" # $ETH_NODE_ID
|
||||||
chainID = "1" # ETH_CHAIN_ID
|
networkID = "1" # $ETH_NETWORK_ID
|
||||||
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # ETH_GENESIS_BLOCK
|
chainID = "1" # $ETH_CHAIN_ID
|
||||||
|
genesisBlock = "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" # $ETH_GENESIS_BLOCK
|
||||||
```
|
```
|
||||||
|
|
||||||
> **Note:** previous versions of this service used different variable names. To update, change the following:
|
|
||||||
> * `LVL_DB_PATH`, `LEVELDB_PATH` => `ETHDB_PATH`
|
|
||||||
> * `ANCIENT_DB_PATH`, `LEVELDB_ANCIENT` => `ETHDB_ANCIENT`
|
|
||||||
> * `LOGRUS_LEVEL`, `LOGRUS_FILE` => `LOG_LEVEL`, `LOG_FILE`, etc.
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
* For state snapshot from EthDB:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Account selective snapshot: To restrict the snapshot to a list of accounts (addresses), provide the addresses in config parameter `snapshot.accounts` or env variable `SNAPSHOT_ACCOUNTS`. Only nodes related to provided addresses will be indexed.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[snapshot]
|
|
||||||
accounts = [
|
|
||||||
"0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Monitoring
|
|
||||||
|
|
||||||
* Enable metrics using config parameters `prom.metrics` and `prom.http`.
|
|
||||||
* `ipld-eth-state-snapshot` exposes following prometheus metrics at `/metrics` endpoint:
|
|
||||||
* `state_node_count`: Number of state nodes processed.
|
|
||||||
* `storage_node_count`: Number of storage nodes processed.
|
|
||||||
* `code_node_count`: Number of code nodes processed.
|
|
||||||
* DB stats if operating in `postgres` mode.
|
|
||||||
|
|
||||||
## Tests
|
## Tests
|
||||||
|
|
||||||
* Run unit tests:
|
* Install [mockgen](https://github.com/golang/mock#installation)
|
||||||
|
* `make test`
|
||||||
```bash
|
|
||||||
# setup db
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# run tests after db migrations are run
|
|
||||||
make dbtest
|
|
||||||
|
|
||||||
# tear down db
|
|
||||||
docker-compose down -v --remove-orphans
|
|
||||||
```
|
|
||||||
|
|
||||||
## Import output data in file mode into a database
|
|
||||||
|
|
||||||
* When `ipld-eth-state-snapshot stateSnapshot` is run in file mode (`database.type`), the output is in form of CSV files.
|
|
||||||
|
|
||||||
* Assuming the output files are located in host's `./output_dir` directory.
|
|
||||||
|
|
||||||
* Data post-processing:
|
|
||||||
|
|
||||||
* Create a directory to store post-processed output:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir -p output_dir/processed_output
|
|
||||||
```
|
|
||||||
|
|
||||||
* Combine output from multiple workers and copy to post-processed output directory:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# ipld.blocks
|
|
||||||
cat {output_dir,output_dir/*}/ipld.blocks.csv > output_dir/processed_output/combined-ipld.blocks.csv
|
|
||||||
|
|
||||||
# eth.state_cids
|
|
||||||
cat output_dir/*/eth.state_cids.csv > output_dir/processed_output/combined-eth.state_cids.csv
|
|
||||||
|
|
||||||
# eth.storage_cids
|
|
||||||
cat output_dir/*/eth.storage_cids.csv > output_dir/processed_output/combined-eth.storage_cids.csv
|
|
||||||
|
|
||||||
# public.nodes
|
|
||||||
cp output_dir/public.nodes.csv output_dir/processed_output/public.nodes.csv
|
|
||||||
|
|
||||||
# eth.header_cids
|
|
||||||
cp output_dir/eth.header_cids.csv output_dir/processed_output/eth.header_cids.csv
|
|
||||||
```
|
|
||||||
|
|
||||||
* De-duplicate data:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# ipld.blocks
|
|
||||||
sort -u output_dir/processed_output/combined-ipld.blocks.csv -o output_dir/processed_output/deduped-combined-ipld.blocks.csv
|
|
||||||
|
|
||||||
# eth.header_cids
|
|
||||||
sort -u output_dir/processed_output/eth.header_cids.csv -o output_dir/processed_output/deduped-eth.header_cids.csv
|
|
||||||
|
|
||||||
# eth.state_cids
|
|
||||||
sort -u output_dir/processed_output/combined-eth.state_cids.csv -o output_dir/processed_output/deduped-combined-eth.state_cids.csv
|
|
||||||
|
|
||||||
# eth.storage_cids
|
|
||||||
sort -u output_dir/processed_output/combined-eth.storage_cids.csv -o output_dir/processed_output/deduped-combined-eth.storage_cids.csv
|
|
||||||
```
|
|
||||||
|
|
||||||
* Copy over the post-processed output files to the DB server (say in `/output_dir`).
|
|
||||||
|
|
||||||
* Start `psql` to run the import commands:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
psql -U <DATABASE_USER> -h <DATABASE_HOSTNAME> -p <DATABASE_PORT> <DATABASE_NAME>
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run the following to import data:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# public.nodes
|
|
||||||
COPY public.nodes FROM '/output_dir/processed_output/public.nodes.csv' CSV;
|
|
||||||
|
|
||||||
# ipld.blocks
|
|
||||||
COPY ipld.blocks FROM '/output_dir/processed_output/deduped-combined-ipld.blocks.csv' CSV;
|
|
||||||
|
|
||||||
# eth.header_cids
|
|
||||||
COPY eth.header_cids FROM '/output_dir/processed_output/deduped-eth.header_cids.csv' CSV;
|
|
||||||
|
|
||||||
# eth.state_cids
|
|
||||||
COPY eth.state_cids FROM '/output_dir/processed_output/deduped-combined-eth.state_cids.csv' CSV FORCE NOT NULL state_leaf_key;
|
|
||||||
|
|
||||||
# eth.storage_cids
|
|
||||||
COPY eth.storage_cids FROM '/output_dir/processed_output/deduped-combined-eth.storage_cids.csv' CSV FORCE NOT NULL storage_leaf_key;
|
|
||||||
```
|
|
||||||
|
|
||||||
* NOTE: `COPY` command on CSVs inserts empty strings as `NULL` in the DB. Passing `FORCE_NOT_NULL <COLUMN_NAME>` forces it to insert empty strings instead. This is required to maintain compatibility of the imported snapshot data with the data generated by statediffing. Reference: https://www.postgresql.org/docs/14/sql-copy.html
|
|
||||||
|
|
||||||
### Troubleshooting
|
|
||||||
|
|
||||||
* Run the following command to find any rows (in data dumps in `file` mode) having unexpected number of columns:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d true
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run the following command to select rows (from data dumps in `file` mode) other than the ones having unexpected number of columns:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
|
|
||||||
```
|
|
||||||
|
|
||||||
* See [scripts](./scripts) for more details.
|
|
||||||
|
21
cmd/root.go
21
cmd/root.go
@ -25,8 +25,8 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -42,13 +42,14 @@ var rootCmd = &cobra.Command{
|
|||||||
|
|
||||||
// Execute executes root Command.
|
// Execute executes root Command.
|
||||||
func Execute() {
|
func Execute() {
|
||||||
|
log.Info("----- Starting vDB -----")
|
||||||
if err := rootCmd.Execute(); err != nil {
|
if err := rootCmd.Execute(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initFuncs(cmd *cobra.Command, args []string) {
|
func initFuncs(cmd *cobra.Command, args []string) {
|
||||||
logfile := viper.GetString(snapshot.LOG_FILE_TOML)
|
logfile := viper.GetString(snapshot.LOGRUS_FILE_TOML)
|
||||||
if logfile != "" {
|
if logfile != "" {
|
||||||
file, err := os.OpenFile(logfile,
|
file, err := os.OpenFile(logfile,
|
||||||
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||||
@ -67,7 +68,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if viper.GetBool(snapshot.PROM_METRICS_TOML) {
|
if viper.GetBool(snapshot.PROM_METRICS_TOML) {
|
||||||
log.Info("Initializing prometheus metrics")
|
log.Info("initializing prometheus metrics")
|
||||||
prom.Init()
|
prom.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +84,7 @@ func initFuncs(cmd *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func logLevel() error {
|
func logLevel() error {
|
||||||
lvl, err := log.ParseLevel(viper.GetString(snapshot.LOG_LEVEL_TOML))
|
lvl, err := log.ParseLevel(viper.GetString(snapshot.LOGRUS_LEVEL_TOML))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -102,13 +103,13 @@ func init() {
|
|||||||
viper.AutomaticEnv()
|
viper.AutomaticEnv()
|
||||||
|
|
||||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file location")
|
||||||
rootCmd.PersistentFlags().String(snapshot.LOG_FILE_CLI, "", "file path for logging")
|
rootCmd.PersistentFlags().String(snapshot.LOGRUS_FILE_CLI, "", "file path for logging")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_NAME_CLI, "vulcanize_public", "database name")
|
||||||
rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port")
|
rootCmd.PersistentFlags().Int(snapshot.DATABASE_PORT_CLI, 5432, "database port")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_HOSTNAME_CLI, "localhost", "database hostname")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_USER_CLI, "", "database user")
|
||||||
rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password")
|
rootCmd.PersistentFlags().String(snapshot.DATABASE_PASSWORD_CLI, "", "database password")
|
||||||
rootCmd.PersistentFlags().String(snapshot.LOG_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
rootCmd.PersistentFlags().String(snapshot.LOGRUS_LEVEL_CLI, log.InfoLevel.String(), "log level (trace, debug, info, warn, error, fatal, panic)")
|
||||||
|
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_METRICS_CLI, false, "enable prometheus metrics")
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_HTTP_CLI, false, "enable prometheus http service")
|
||||||
@ -116,13 +117,13 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port")
|
rootCmd.PersistentFlags().String(snapshot.PROM_HTTP_PORT_CLI, "8086", "prometheus http port")
|
||||||
rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats")
|
rootCmd.PersistentFlags().Bool(snapshot.PROM_DB_STATS_CLI, false, "enables prometheus db stats")
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.LOG_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_FILE_CLI))
|
viper.BindPFlag(snapshot.LOGRUS_FILE_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_FILE_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI))
|
viper.BindPFlag(snapshot.DATABASE_NAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_NAME_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI))
|
viper.BindPFlag(snapshot.DATABASE_PORT_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PORT_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI))
|
viper.BindPFlag(snapshot.DATABASE_HOSTNAME_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_HOSTNAME_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI))
|
viper.BindPFlag(snapshot.DATABASE_USER_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_USER_CLI))
|
||||||
viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI))
|
viper.BindPFlag(snapshot.DATABASE_PASSWORD_TOML, rootCmd.PersistentFlags().Lookup(snapshot.DATABASE_PASSWORD_CLI))
|
||||||
viper.BindPFlag(snapshot.LOG_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOG_LEVEL_CLI))
|
viper.BindPFlag(snapshot.LOGRUS_LEVEL_TOML, rootCmd.PersistentFlags().Lookup(snapshot.LOGRUS_LEVEL_CLI))
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI))
|
viper.BindPFlag(snapshot.PROM_METRICS_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_METRICS_CLI))
|
||||||
viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI))
|
viper.BindPFlag(snapshot.PROM_HTTP_TOML, rootCmd.PersistentFlags().Lookup(snapshot.PROM_HTTP_CLI))
|
||||||
@ -137,7 +138,7 @@ func initConfig() {
|
|||||||
if err := viper.ReadInConfig(); err == nil {
|
if err := viper.ReadInConfig(); err == nil {
|
||||||
log.Printf("Using config file: %s", viper.ConfigFileUsed())
|
log.Printf("Using config file: %s", viper.ConfigFileUsed())
|
||||||
} else {
|
} else {
|
||||||
log.Fatalf("Couldn't read config file: %s", err)
|
log.Fatal(fmt.Sprintf("Couldn't read config file: %s", err.Error()))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("No config file passed with --config flag")
|
log.Warn("No config file passed with --config flag")
|
||||||
|
@ -16,21 +16,19 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot"
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// stateSnapshotCmd represents the stateSnapshot command
|
// stateSnapshotCmd represents the stateSnapshot command
|
||||||
var stateSnapshotCmd = &cobra.Command{
|
var stateSnapshotCmd = &cobra.Command{
|
||||||
Use: "stateSnapshot",
|
Use: "stateSnapshot",
|
||||||
Short: "Extract the entire Ethereum state from Ethdb and publish into PG-IPFS",
|
Short: "Extract the entire Ethereum state from leveldb and publish into PG-IPFS",
|
||||||
Long: `Usage
|
Long: `Usage
|
||||||
|
|
||||||
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`,
|
./ipld-eth-state-snapshot stateSnapshot --config={path to toml config file}`,
|
||||||
@ -42,14 +40,15 @@ var stateSnapshotCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stateSnapshot() {
|
func stateSnapshot() {
|
||||||
mode := snapshot.SnapshotMode(viper.GetString(snapshot.SNAPSHOT_MODE_TOML))
|
modeStr := viper.GetString(snapshot.SNAPSHOT_MODE_TOML)
|
||||||
|
mode := snapshot.SnapshotMode(modeStr)
|
||||||
config, err := snapshot.NewConfig(mode)
|
config, err := snapshot.NewConfig(mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatalf("unable to initialize config: %v", err)
|
logWithCommand.Fatalf("unable to initialize config: %v", err)
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("opening ethdb and ancient data at %s and %s",
|
logWithCommand.Infof("opening levelDB and ancient data at %s and %s",
|
||||||
config.Eth.DBPath, config.Eth.AncientDBPath)
|
config.Eth.LevelDBPath, config.Eth.AncientDBPath)
|
||||||
edb, err := snapshot.NewEthDB(config.Eth)
|
edb, err := snapshot.NewLevelDB(config.Eth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -60,60 +59,46 @@ func stateSnapshot() {
|
|||||||
logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile)
|
logWithCommand.Infof("no recovery file set, using default: %s", recoveryFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
var idxconfig indexer.Config
|
pub, err := snapshot.NewPublisher(mode, config)
|
||||||
switch mode {
|
|
||||||
case snapshot.PgSnapshot:
|
|
||||||
idxconfig = *config.DB
|
|
||||||
case snapshot.FileSnapshot:
|
|
||||||
idxconfig = *config.File
|
|
||||||
}
|
|
||||||
_, indexer, err := indexer.NewStateDiffIndexer(
|
|
||||||
context.Background(),
|
|
||||||
nil, // ChainConfig is only used in PushBlock, which we don't call
|
|
||||||
config.Eth.NodeInfo,
|
|
||||||
idxconfig,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotService, err := snapshot.NewSnapshotService(edb, indexer, recoveryFile)
|
snapshotService, err := snapshot.NewSnapshotService(edb, pub, recoveryFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
workers := viper.GetUint(snapshot.SNAPSHOT_WORKERS_TOML)
|
workers := viper.GetUint(snapshot.SNAPSHOT_WORKERS_TOML)
|
||||||
|
|
||||||
if height < 0 {
|
if height < 0 {
|
||||||
if err := snapshotService.CreateLatestSnapshot(workers, config.Service.AllowedAccounts); err != nil {
|
if err := snapshotService.CreateLatestSnapshot(workers); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
params := snapshot.SnapshotParams{Workers: workers, Height: uint64(height), WatchedAddresses: config.Service.AllowedAccounts}
|
params := snapshot.SnapshotParams{Workers: workers, Height: uint64(height)}
|
||||||
if err := snapshotService.CreateSnapshot(params); err != nil {
|
if err := snapshotService.CreateSnapshot(params); err != nil {
|
||||||
logWithCommand.Fatal(err)
|
logWithCommand.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logWithCommand.Infof("State snapshot at height %d is complete", height)
|
logWithCommand.Infof("state snapshot at height %d is complete", height)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.AddCommand(stateSnapshotCmd)
|
rootCmd.AddCommand(stateSnapshotCmd)
|
||||||
|
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_PATH_CLI, "", "path to primary datastore")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.LVL_DB_PATH_CLI, "", "path to primary datastore")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.ETHDB_ANCIENT_CLI, "", "path to ancient datastore")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.ANCIENT_DB_PATH_CLI, "", "path to ancient datastore")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI, "", "block height to extract state at")
|
||||||
stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use")
|
stateSnapshotCmd.PersistentFlags().Int(snapshot.SNAPSHOT_WORKERS_CLI, 1, "number of concurrent workers to use")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_RECOVERY_FILE_CLI, "", "file to recover from a previous iteration")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_MODE_CLI, "postgres", "output mode for snapshot ('file' or 'postgres')")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.SNAPSHOT_MODE_CLI, "postgres", "output mode for snapshot ('file' or 'postgres')")
|
||||||
stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode")
|
stateSnapshotCmd.PersistentFlags().String(snapshot.FILE_OUTPUT_DIR_CLI, "", "directory for writing ouput to while operating in 'file' mode")
|
||||||
stateSnapshotCmd.PersistentFlags().StringArray(snapshot.SNAPSHOT_ACCOUNTS_CLI, nil, "list of account addresses to limit snapshot to")
|
|
||||||
|
|
||||||
viper.BindPFlag(snapshot.ETHDB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_PATH_CLI))
|
viper.BindPFlag(snapshot.LVL_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.LVL_DB_PATH_CLI))
|
||||||
viper.BindPFlag(snapshot.ETHDB_ANCIENT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ETHDB_ANCIENT_CLI))
|
viper.BindPFlag(snapshot.ANCIENT_DB_PATH_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.ANCIENT_DB_PATH_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_BLOCK_HEIGHT_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_BLOCK_HEIGHT_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_WORKERS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_WORKERS_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_RECOVERY_FILE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_RECOVERY_FILE_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_MODE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_MODE_CLI))
|
viper.BindPFlag(snapshot.SNAPSHOT_MODE_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_MODE_CLI))
|
||||||
viper.BindPFlag(snapshot.FILE_OUTPUT_DIR_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.FILE_OUTPUT_DIR_CLI))
|
viper.BindPFlag(snapshot.FILE_OUTPUT_DIR_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.FILE_OUTPUT_DIR_CLI))
|
||||||
viper.BindPFlag(snapshot.SNAPSHOT_ACCOUNTS_TOML, stateSnapshotCmd.PersistentFlags().Lookup(snapshot.SNAPSHOT_ACCOUNTS_CLI))
|
|
||||||
}
|
}
|
||||||
|
8
db/migrations/00001_create_ipfs_blocks_table.sql
Normal file
8
db/migrations/00001_create_ipfs_blocks_table.sql
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE IF NOT EXISTS public.blocks (
|
||||||
|
key TEXT UNIQUE NOT NULL,
|
||||||
|
data BYTEA NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE public.blocks;
|
12
db/migrations/00002_create_nodes_table.sql
Normal file
12
db/migrations/00002_create_nodes_table.sql
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE nodes (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
client_name VARCHAR,
|
||||||
|
genesis_block VARCHAR(66),
|
||||||
|
network_id VARCHAR,
|
||||||
|
node_id VARCHAR(128),
|
||||||
|
CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE nodes;
|
5
db/migrations/00003_create_eth_schema.sql
Normal file
5
db/migrations/00003_create_eth_schema.sql
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE SCHEMA eth;
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP SCHEMA eth;
|
23
db/migrations/00004_create_eth_header_cids_table.sql
Normal file
23
db/migrations/00004_create_eth_header_cids_table.sql
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.header_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
block_number BIGINT NOT NULL,
|
||||||
|
block_hash VARCHAR(66) NOT NULL,
|
||||||
|
parent_hash VARCHAR(66) NOT NULL,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
td NUMERIC NOT NULL,
|
||||||
|
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
|
||||||
|
reward NUMERIC NOT NULL,
|
||||||
|
state_root VARCHAR(66) NOT NULL,
|
||||||
|
tx_root VARCHAR(66) NOT NULL,
|
||||||
|
receipt_root VARCHAR(66) NOT NULL,
|
||||||
|
uncle_root VARCHAR(66) NOT NULL,
|
||||||
|
bloom BYTEA NOT NULL,
|
||||||
|
timestamp NUMERIC NOT NULL,
|
||||||
|
times_validated INTEGER NOT NULL DEFAULT 1,
|
||||||
|
UNIQUE (block_number, block_hash)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.header_cids;
|
14
db/migrations/00005_create_eth_uncle_cids_table.sql
Normal file
14
db/migrations/00005_create_eth_uncle_cids_table.sql
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.uncle_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
block_hash VARCHAR(66) NOT NULL,
|
||||||
|
parent_hash VARCHAR(66) NOT NULL,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
reward NUMERIC NOT NULL,
|
||||||
|
UNIQUE (header_id, block_hash)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.uncle_cids;
|
15
db/migrations/00006_create_eth_transaction_cids_table.sql
Normal file
15
db/migrations/00006_create_eth_transaction_cids_table.sql
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.transaction_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
tx_hash VARCHAR(66) NOT NULL,
|
||||||
|
index INTEGER NOT NULL,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
dst VARCHAR(66) NOT NULL,
|
||||||
|
src VARCHAR(66) NOT NULL,
|
||||||
|
UNIQUE (header_id, tx_hash)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.transaction_cids;
|
18
db/migrations/00007_create_eth_receipt_cids_table.sql
Normal file
18
db/migrations/00007_create_eth_receipt_cids_table.sql
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.receipt_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
tx_id INTEGER NOT NULL REFERENCES eth.transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
contract VARCHAR(66),
|
||||||
|
contract_hash VARCHAR(66),
|
||||||
|
topic0s VARCHAR(66)[],
|
||||||
|
topic1s VARCHAR(66)[],
|
||||||
|
topic2s VARCHAR(66)[],
|
||||||
|
topic3s VARCHAR(66)[],
|
||||||
|
log_contracts VARCHAR(66)[],
|
||||||
|
UNIQUE (tx_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.receipt_cids;
|
15
db/migrations/00008_create_eth_state_cids_table.sql
Normal file
15
db/migrations/00008_create_eth_state_cids_table.sql
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.state_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
header_id INTEGER NOT NULL REFERENCES eth.header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
state_leaf_key VARCHAR(66),
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
state_path BYTEA,
|
||||||
|
node_type INTEGER,
|
||||||
|
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
UNIQUE (header_id, state_path)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.state_cids;
|
15
db/migrations/00009_create_eth_storage_cids_table.sql
Normal file
15
db/migrations/00009_create_eth_storage_cids_table.sql
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.storage_cids (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
storage_leaf_key VARCHAR(66),
|
||||||
|
cid TEXT NOT NULL,
|
||||||
|
mh_key TEXT NOT NULL REFERENCES public.blocks (key) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||||
|
storage_path BYTEA,
|
||||||
|
node_type INTEGER NOT NULL,
|
||||||
|
diff BOOLEAN NOT NULL DEFAULT FALSE,
|
||||||
|
UNIQUE (state_id, storage_path)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.storage_cids;
|
13
db/migrations/00010_create_eth_state_accouts_table.sql
Normal file
13
db/migrations/00010_create_eth_state_accouts_table.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
-- +goose Up
|
||||||
|
CREATE TABLE eth.state_accounts (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
state_id INTEGER NOT NULL REFERENCES eth.state_cids (id) ON DELETE CASCADE,
|
||||||
|
balance NUMERIC NOT NULL,
|
||||||
|
nonce INTEGER NOT NULL,
|
||||||
|
code_hash BYTEA NOT NULL,
|
||||||
|
storage_root VARCHAR(66) NOT NULL,
|
||||||
|
UNIQUE (state_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
-- +goose Down
|
||||||
|
DROP TABLE eth.state_accounts;
|
@ -1,17 +1,16 @@
|
|||||||
[database]
|
[database]
|
||||||
name = "cerc_testing"
|
name = "vulcanize_public"
|
||||||
hostname = "localhost"
|
hostname = "localhost"
|
||||||
port = 8077
|
port = 5432
|
||||||
user = "vdbm"
|
user = "postgres"
|
||||||
password = "password"
|
|
||||||
|
|
||||||
[ethdb]
|
[leveldb]
|
||||||
path = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data"
|
path = "/Users/iannorden/Library/Ethereum/geth/chaindata"
|
||||||
ancient = "/Users/user/go/src/github.com/cerc-io/ipld-eth-state-snapshot/fixture/chain2data/ancient"
|
ancient = "/Users/iannorden/Library/Ethereum/geth/chaindata/ancient"
|
||||||
|
|
||||||
[log]
|
[log]
|
||||||
level = "info"
|
level = "info"
|
||||||
file = "" # Leave blank to output to stdout
|
file = "log_file"
|
||||||
|
|
||||||
[prom]
|
[prom]
|
||||||
metrics = true
|
metrics = true
|
||||||
@ -23,7 +22,7 @@
|
|||||||
[snapshot]
|
[snapshot]
|
||||||
mode = "file"
|
mode = "file"
|
||||||
workers = 4
|
workers = 4
|
||||||
blockHeight = 32
|
blockHeight = -1
|
||||||
recoveryFile = "recovery_file"
|
recoveryFile = "recovery_file"
|
||||||
|
|
||||||
[file]
|
[file]
|
||||||
|
27
fixture/chaindata.go
Normal file
27
fixture/chaindata.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package fixture
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: embed some mainnet data
|
||||||
|
// import "embed"
|
||||||
|
//_go:embed mainnet_data.tar.gz
|
||||||
|
|
||||||
|
var (
|
||||||
|
ChaindataPath, AncientdataPath string
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_, path, _, _ := runtime.Caller(0)
|
||||||
|
wd := filepath.Dir(path)
|
||||||
|
|
||||||
|
ChaindataPath = filepath.Join(wd, "..", "fixture", "chaindata")
|
||||||
|
AncientdataPath = filepath.Join(ChaindataPath, "ancient")
|
||||||
|
|
||||||
|
if _, err := os.Stat(ChaindataPath); err != nil {
|
||||||
|
panic("must populate chaindata at " + ChaindataPath)
|
||||||
|
}
|
||||||
|
}
|
6
fixture/chaindata/.gitignore
vendored
Normal file
6
fixture/chaindata/.gitignore
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
*.log
|
||||||
|
CURRENT*
|
||||||
|
LOCK
|
||||||
|
LOG
|
||||||
|
MANIFEST-*
|
||||||
|
ancient/FLOCK
|
BIN
fixture/chaindata/000002.ldb
Normal file
BIN
fixture/chaindata/000002.ldb
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/bodies.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/bodies.0000.cdat
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/bodies.cidx
Normal file
BIN
fixture/chaindata/ancient/bodies.cidx
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/diffs.0000.rdat
Normal file
BIN
fixture/chaindata/ancient/diffs.0000.rdat
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/diffs.ridx
Normal file
BIN
fixture/chaindata/ancient/diffs.ridx
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/hashes.0000.rdat
Normal file
BIN
fixture/chaindata/ancient/hashes.0000.rdat
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/hashes.ridx
Normal file
BIN
fixture/chaindata/ancient/hashes.ridx
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/headers.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/headers.0000.cdat
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/headers.cidx
Normal file
BIN
fixture/chaindata/ancient/headers.cidx
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/receipts.0000.cdat
Normal file
BIN
fixture/chaindata/ancient/receipts.0000.cdat
Normal file
Binary file not shown.
BIN
fixture/chaindata/ancient/receipts.cidx
Normal file
BIN
fixture/chaindata/ancient/receipts.cidx
Normal file
Binary file not shown.
359
fixture/node_paths.go
Normal file
359
fixture/node_paths.go
Normal file
@ -0,0 +1,359 @@
|
|||||||
|
package fixture
|
||||||
|
|
||||||
|
var Block1_StateNodePaths = [][]byte{
|
||||||
|
[]byte{},
|
||||||
|
[]byte{0},
|
||||||
|
[]byte{0, 0},
|
||||||
|
[]byte{0, 2},
|
||||||
|
[]byte{0, 2, 1},
|
||||||
|
[]byte{0, 2, 8},
|
||||||
|
[]byte{0, 2, 12},
|
||||||
|
[]byte{0, 3},
|
||||||
|
[]byte{0, 4},
|
||||||
|
[]byte{0, 6},
|
||||||
|
[]byte{0, 6, 3},
|
||||||
|
[]byte{0, 6, 13},
|
||||||
|
[]byte{0, 7},
|
||||||
|
[]byte{0, 8},
|
||||||
|
[]byte{0, 8, 7},
|
||||||
|
[]byte{0, 8, 11},
|
||||||
|
[]byte{0, 9},
|
||||||
|
[]byte{0, 9, 9},
|
||||||
|
[]byte{0, 9, 10},
|
||||||
|
[]byte{0, 12},
|
||||||
|
[]byte{0, 13},
|
||||||
|
[]byte{0, 14},
|
||||||
|
[]byte{1},
|
||||||
|
[]byte{1, 2},
|
||||||
|
[]byte{1, 2, 5},
|
||||||
|
[]byte{1, 2, 7},
|
||||||
|
[]byte{1, 3},
|
||||||
|
[]byte{1, 3, 1},
|
||||||
|
[]byte{1, 3, 11},
|
||||||
|
[]byte{1, 4},
|
||||||
|
[]byte{1, 5},
|
||||||
|
[]byte{1, 5, 11},
|
||||||
|
[]byte{1, 5, 12},
|
||||||
|
[]byte{1, 5, 15},
|
||||||
|
[]byte{1, 6},
|
||||||
|
[]byte{1, 8},
|
||||||
|
[]byte{1, 10},
|
||||||
|
[]byte{1, 13},
|
||||||
|
[]byte{1, 14},
|
||||||
|
[]byte{1, 14, 2},
|
||||||
|
[]byte{1, 14, 11},
|
||||||
|
[]byte{1, 15},
|
||||||
|
[]byte{1, 15, 9},
|
||||||
|
[]byte{1, 15, 15},
|
||||||
|
[]byte{2},
|
||||||
|
[]byte{2, 0},
|
||||||
|
[]byte{2, 0, 9},
|
||||||
|
[]byte{2, 0, 14},
|
||||||
|
[]byte{2, 1},
|
||||||
|
[]byte{2, 1, 1},
|
||||||
|
[]byte{2, 1, 3},
|
||||||
|
[]byte{2, 1, 14},
|
||||||
|
[]byte{2, 5},
|
||||||
|
[]byte{2, 6},
|
||||||
|
[]byte{2, 9},
|
||||||
|
[]byte{2, 9, 1},
|
||||||
|
[]byte{2, 9, 7},
|
||||||
|
[]byte{2, 11},
|
||||||
|
[]byte{2, 11, 7},
|
||||||
|
[]byte{2, 11, 13},
|
||||||
|
[]byte{2, 13},
|
||||||
|
[]byte{2, 13, 1},
|
||||||
|
[]byte{2, 13, 15},
|
||||||
|
[]byte{2, 15},
|
||||||
|
[]byte{3},
|
||||||
|
[]byte{3, 0},
|
||||||
|
[]byte{3, 0, 0},
|
||||||
|
[]byte{3, 0, 1},
|
||||||
|
[]byte{3, 2},
|
||||||
|
[]byte{3, 2, 3},
|
||||||
|
[]byte{3, 2, 15},
|
||||||
|
[]byte{3, 3},
|
||||||
|
[]byte{3, 4},
|
||||||
|
[]byte{3, 4, 2},
|
||||||
|
[]byte{3, 4, 4},
|
||||||
|
[]byte{3, 4, 5},
|
||||||
|
[]byte{3, 6},
|
||||||
|
[]byte{3, 8},
|
||||||
|
[]byte{3, 9},
|
||||||
|
[]byte{3, 10},
|
||||||
|
[]byte{3, 10, 2},
|
||||||
|
[]byte{3, 10, 8},
|
||||||
|
[]byte{3, 10, 12},
|
||||||
|
[]byte{3, 11},
|
||||||
|
[]byte{3, 12},
|
||||||
|
[]byte{3, 13},
|
||||||
|
[]byte{3, 14},
|
||||||
|
[]byte{3, 14, 4},
|
||||||
|
[]byte{3, 14, 9},
|
||||||
|
[]byte{3, 14, 14},
|
||||||
|
[]byte{3, 14, 14, 10},
|
||||||
|
[]byte{3, 14, 14, 15},
|
||||||
|
[]byte{4},
|
||||||
|
[]byte{4, 0},
|
||||||
|
[]byte{4, 0, 6},
|
||||||
|
[]byte{4, 0, 15},
|
||||||
|
[]byte{4, 1},
|
||||||
|
[]byte{4, 2},
|
||||||
|
[]byte{4, 2, 1},
|
||||||
|
[]byte{4, 2, 11},
|
||||||
|
[]byte{4, 3},
|
||||||
|
[]byte{4, 5},
|
||||||
|
[]byte{4, 6},
|
||||||
|
[]byte{4, 7},
|
||||||
|
[]byte{4, 8},
|
||||||
|
[]byte{4, 11},
|
||||||
|
[]byte{4, 11, 6},
|
||||||
|
[]byte{4, 11, 9},
|
||||||
|
[]byte{4, 11, 12},
|
||||||
|
[]byte{4, 14},
|
||||||
|
[]byte{5},
|
||||||
|
[]byte{5, 0},
|
||||||
|
[]byte{5, 0, 3},
|
||||||
|
[]byte{5, 0, 9},
|
||||||
|
[]byte{5, 0, 15},
|
||||||
|
[]byte{5, 1},
|
||||||
|
[]byte{5, 1, 14},
|
||||||
|
[]byte{5, 1, 15},
|
||||||
|
[]byte{5, 2},
|
||||||
|
[]byte{5, 2, 8},
|
||||||
|
[]byte{5, 2, 10},
|
||||||
|
[]byte{5, 3},
|
||||||
|
[]byte{5, 4},
|
||||||
|
[]byte{5, 4, 6},
|
||||||
|
[]byte{5, 4, 12},
|
||||||
|
[]byte{5, 6},
|
||||||
|
[]byte{5, 8},
|
||||||
|
[]byte{5, 8, 3},
|
||||||
|
[]byte{5, 8, 11},
|
||||||
|
[]byte{5, 10},
|
||||||
|
[]byte{5, 11},
|
||||||
|
[]byte{5, 12},
|
||||||
|
[]byte{5, 13},
|
||||||
|
[]byte{5, 15},
|
||||||
|
[]byte{6},
|
||||||
|
[]byte{6, 0},
|
||||||
|
[]byte{6, 2},
|
||||||
|
[]byte{6, 2, 3},
|
||||||
|
[]byte{6, 2, 9},
|
||||||
|
[]byte{6, 4},
|
||||||
|
[]byte{6, 4, 0},
|
||||||
|
[]byte{6, 4, 0, 0},
|
||||||
|
[]byte{6, 4, 0, 5},
|
||||||
|
[]byte{6, 5},
|
||||||
|
[]byte{6, 5, 4},
|
||||||
|
[]byte{6, 5, 10},
|
||||||
|
[]byte{6, 5, 12},
|
||||||
|
[]byte{6, 5, 13},
|
||||||
|
[]byte{6, 6},
|
||||||
|
[]byte{6, 6, 0},
|
||||||
|
[]byte{6, 6, 8},
|
||||||
|
[]byte{6, 8},
|
||||||
|
[]byte{6, 8, 4},
|
||||||
|
[]byte{6, 8, 4, 2},
|
||||||
|
[]byte{6, 8, 4, 9},
|
||||||
|
[]byte{6, 8, 9},
|
||||||
|
[]byte{6, 10},
|
||||||
|
[]byte{6, 10, 1},
|
||||||
|
[]byte{6, 10, 14},
|
||||||
|
[]byte{6, 11},
|
||||||
|
[]byte{6, 11, 2},
|
||||||
|
[]byte{6, 11, 12},
|
||||||
|
[]byte{6, 11, 14},
|
||||||
|
[]byte{6, 13},
|
||||||
|
[]byte{6, 13, 2},
|
||||||
|
[]byte{6, 13, 12},
|
||||||
|
[]byte{7},
|
||||||
|
[]byte{7, 1},
|
||||||
|
[]byte{7, 5},
|
||||||
|
[]byte{7, 7},
|
||||||
|
[]byte{7, 8},
|
||||||
|
[]byte{7, 8, 2},
|
||||||
|
[]byte{7, 8, 5},
|
||||||
|
[]byte{7, 9},
|
||||||
|
[]byte{7, 13},
|
||||||
|
[]byte{7, 13, 1},
|
||||||
|
[]byte{7, 13, 1, 0},
|
||||||
|
[]byte{7, 13, 1, 13},
|
||||||
|
[]byte{7, 13, 7},
|
||||||
|
[]byte{7, 14},
|
||||||
|
[]byte{7, 14, 8},
|
||||||
|
[]byte{7, 14, 11},
|
||||||
|
[]byte{8},
|
||||||
|
[]byte{8, 0},
|
||||||
|
[]byte{8, 0, 3},
|
||||||
|
[]byte{8, 0, 11},
|
||||||
|
[]byte{8, 2},
|
||||||
|
[]byte{8, 4},
|
||||||
|
[]byte{8, 8},
|
||||||
|
[]byte{8, 9},
|
||||||
|
[]byte{8, 9, 3},
|
||||||
|
[]byte{8, 9, 13},
|
||||||
|
[]byte{8, 10},
|
||||||
|
[]byte{8, 12},
|
||||||
|
[]byte{8, 12, 3},
|
||||||
|
[]byte{8, 12, 15},
|
||||||
|
[]byte{8, 13},
|
||||||
|
[]byte{8, 15},
|
||||||
|
[]byte{8, 15, 8},
|
||||||
|
[]byte{8, 15, 13},
|
||||||
|
[]byte{9},
|
||||||
|
[]byte{9, 0},
|
||||||
|
[]byte{9, 5},
|
||||||
|
[]byte{9, 6},
|
||||||
|
[]byte{9, 6, 10},
|
||||||
|
[]byte{9, 6, 14},
|
||||||
|
[]byte{9, 7},
|
||||||
|
[]byte{9, 9},
|
||||||
|
[]byte{9, 14},
|
||||||
|
[]byte{9, 15},
|
||||||
|
[]byte{9, 15, 0},
|
||||||
|
[]byte{9, 15, 4},
|
||||||
|
[]byte{9, 15, 10},
|
||||||
|
[]byte{10},
|
||||||
|
[]byte{10, 0},
|
||||||
|
[]byte{10, 0, 9},
|
||||||
|
[]byte{10, 0, 10},
|
||||||
|
[]byte{10, 0, 15},
|
||||||
|
[]byte{10, 2},
|
||||||
|
[]byte{10, 3},
|
||||||
|
[]byte{10, 6},
|
||||||
|
[]byte{10, 8},
|
||||||
|
[]byte{10, 9},
|
||||||
|
[]byte{10, 10},
|
||||||
|
[]byte{10, 10, 5},
|
||||||
|
[]byte{10, 10, 8},
|
||||||
|
[]byte{10, 13},
|
||||||
|
[]byte{10, 13, 0},
|
||||||
|
[]byte{10, 13, 13},
|
||||||
|
[]byte{10, 14},
|
||||||
|
[]byte{10, 14, 4},
|
||||||
|
[]byte{10, 14, 11},
|
||||||
|
[]byte{10, 14, 11, 8},
|
||||||
|
[]byte{10, 14, 11, 14},
|
||||||
|
[]byte{10, 15},
|
||||||
|
[]byte{11},
|
||||||
|
[]byte{11, 0},
|
||||||
|
[]byte{11, 0, 2},
|
||||||
|
[]byte{11, 0, 15},
|
||||||
|
[]byte{11, 1},
|
||||||
|
[]byte{11, 2},
|
||||||
|
[]byte{11, 3},
|
||||||
|
[]byte{11, 4},
|
||||||
|
[]byte{11, 5},
|
||||||
|
[]byte{11, 7},
|
||||||
|
[]byte{11, 7, 12},
|
||||||
|
[]byte{11, 7, 15},
|
||||||
|
[]byte{11, 8},
|
||||||
|
[]byte{11, 8, 8},
|
||||||
|
[]byte{11, 8, 15},
|
||||||
|
[]byte{11, 9},
|
||||||
|
[]byte{11, 11},
|
||||||
|
[]byte{11, 12},
|
||||||
|
[]byte{11, 13},
|
||||||
|
[]byte{11, 14},
|
||||||
|
[]byte{11, 14, 0},
|
||||||
|
[]byte{11, 14, 0, 1},
|
||||||
|
[]byte{11, 14, 0, 3},
|
||||||
|
[]byte{11, 14, 8},
|
||||||
|
[]byte{11, 14, 13},
|
||||||
|
[]byte{12},
|
||||||
|
[]byte{12, 0},
|
||||||
|
[]byte{12, 0, 0},
|
||||||
|
[]byte{12, 0, 1},
|
||||||
|
[]byte{12, 0, 1, 3},
|
||||||
|
[]byte{12, 0, 1, 11},
|
||||||
|
[]byte{12, 0, 15},
|
||||||
|
[]byte{12, 2},
|
||||||
|
[]byte{12, 2, 9},
|
||||||
|
[]byte{12, 2, 12},
|
||||||
|
[]byte{12, 4},
|
||||||
|
[]byte{12, 5},
|
||||||
|
[]byte{12, 6},
|
||||||
|
[]byte{12, 6, 0},
|
||||||
|
[]byte{12, 6, 4},
|
||||||
|
[]byte{12, 6, 14},
|
||||||
|
[]byte{12, 7},
|
||||||
|
[]byte{12, 7, 0},
|
||||||
|
[]byte{12, 7, 12},
|
||||||
|
[]byte{12, 7, 13},
|
||||||
|
[]byte{12, 9},
|
||||||
|
[]byte{12, 11},
|
||||||
|
[]byte{12, 12},
|
||||||
|
[]byte{13},
|
||||||
|
[]byte{13, 2},
|
||||||
|
[]byte{13, 2, 0},
|
||||||
|
[]byte{13, 2, 2},
|
||||||
|
[]byte{13, 2, 4},
|
||||||
|
[]byte{13, 3},
|
||||||
|
[]byte{13, 3, 7},
|
||||||
|
[]byte{13, 3, 10},
|
||||||
|
[]byte{13, 5},
|
||||||
|
[]byte{13, 8},
|
||||||
|
[]byte{13, 8, 1},
|
||||||
|
[]byte{13, 8, 15},
|
||||||
|
[]byte{13, 9},
|
||||||
|
[]byte{13, 9, 0},
|
||||||
|
[]byte{13, 9, 14},
|
||||||
|
[]byte{13, 10},
|
||||||
|
[]byte{13, 12},
|
||||||
|
[]byte{13, 12, 8},
|
||||||
|
[]byte{13, 12, 11},
|
||||||
|
[]byte{13, 13},
|
||||||
|
[]byte{13, 13, 7},
|
||||||
|
[]byte{13, 13, 12},
|
||||||
|
[]byte{13, 14},
|
||||||
|
[]byte{14},
|
||||||
|
[]byte{14, 0},
|
||||||
|
[]byte{14, 1},
|
||||||
|
[]byte{14, 2},
|
||||||
|
[]byte{14, 2, 2},
|
||||||
|
[]byte{14, 2, 12},
|
||||||
|
[]byte{14, 3},
|
||||||
|
[]byte{14, 4},
|
||||||
|
[]byte{14, 5},
|
||||||
|
[]byte{14, 6},
|
||||||
|
[]byte{14, 6, 9},
|
||||||
|
[]byte{14, 6, 12},
|
||||||
|
[]byte{14, 7},
|
||||||
|
[]byte{14, 7, 4},
|
||||||
|
[]byte{14, 7, 12},
|
||||||
|
[]byte{14, 8},
|
||||||
|
[]byte{14, 8, 3},
|
||||||
|
[]byte{14, 8, 12},
|
||||||
|
[]byte{14, 8, 12, 0},
|
||||||
|
[]byte{14, 8, 12, 6},
|
||||||
|
[]byte{14, 10},
|
||||||
|
[]byte{14, 10, 6},
|
||||||
|
[]byte{14, 10, 12},
|
||||||
|
[]byte{14, 11},
|
||||||
|
[]byte{14, 11, 8},
|
||||||
|
[]byte{14, 11, 13},
|
||||||
|
[]byte{14, 12},
|
||||||
|
[]byte{14, 14},
|
||||||
|
[]byte{14, 14, 3},
|
||||||
|
[]byte{14, 14, 9},
|
||||||
|
[]byte{15},
|
||||||
|
[]byte{15, 0},
|
||||||
|
[]byte{15, 5},
|
||||||
|
[]byte{15, 6},
|
||||||
|
[]byte{15, 9},
|
||||||
|
[]byte{15, 9, 0},
|
||||||
|
[]byte{15, 9, 2},
|
||||||
|
[]byte{15, 9, 3},
|
||||||
|
[]byte{15, 11},
|
||||||
|
[]byte{15, 11, 1},
|
||||||
|
[]byte{15, 11, 6},
|
||||||
|
[]byte{15, 12},
|
||||||
|
[]byte{15, 12, 3},
|
||||||
|
[]byte{15, 12, 14},
|
||||||
|
[]byte{15, 12, 14, 7},
|
||||||
|
[]byte{15, 12, 14, 13},
|
||||||
|
[]byte{15, 13},
|
||||||
|
[]byte{15, 14},
|
||||||
|
[]byte{15, 15},
|
||||||
|
}
|
36
fixture/service.go
Normal file
36
fixture/service.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package fixture
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Block1_Header = types.Header{
|
||||||
|
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
|
||||||
|
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
||||||
|
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
||||||
|
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
|
||||||
|
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
||||||
|
Bloom: types.Bloom{},
|
||||||
|
Difficulty: big.NewInt(+2),
|
||||||
|
Number: big.NewInt(+1),
|
||||||
|
GasLimit: 4704588,
|
||||||
|
GasUsed: 0,
|
||||||
|
Time: 1492010458,
|
||||||
|
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
|
||||||
|
MixDigest: common.Hash{},
|
||||||
|
Nonce: types.BlockNonce{},
|
||||||
|
BaseFee: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
var Block1_StateNode0 = snapt.Node{
|
||||||
|
NodeType: 0,
|
||||||
|
Path: []byte{12, 0},
|
||||||
|
Key: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
Value: []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128},
|
||||||
|
}
|
184
go.mod
184
go.mod
@ -1,58 +1,53 @@
|
|||||||
module github.com/cerc-io/ipld-eth-state-snapshot
|
module github.com/vulcanize/ipld-eth-state-snapshot
|
||||||
|
|
||||||
go 1.21
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cerc-io/eth-iterator-utils v0.3.1
|
github.com/ethereum/go-ethereum v1.10.18
|
||||||
github.com/cerc-io/eth-testing v0.5.1
|
|
||||||
github.com/cerc-io/plugeth-statediff v0.3.1
|
|
||||||
github.com/ethereum/go-ethereum v1.14.5
|
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/prometheus/client_golang v1.16.0
|
github.com/ipfs/go-cid v0.1.0
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/ipfs/go-ipfs-blockstore v1.1.2
|
||||||
github.com/spf13/cobra v1.5.0
|
github.com/ipfs/go-ipfs-ds-help v1.1.0
|
||||||
github.com/spf13/viper v1.12.0
|
github.com/jackc/pgx/v4 v4.15.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/multiformats/go-multihash v0.1.0
|
||||||
|
github.com/prometheus/client_golang v1.3.0
|
||||||
|
github.com/sirupsen/logrus v1.6.0
|
||||||
|
github.com/spf13/cobra v1.0.0
|
||||||
|
github.com/spf13/viper v1.7.0
|
||||||
|
github.com/vulcanize/go-eth-state-node-iterator v1.1.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/DataDog/zstd v1.5.5 // indirect
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
|
||||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
|
||||||
github.com/cockroachdb/pebble v1.1.0 // indirect
|
|
||||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
|
||||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
|
||||||
github.com/consensys/bavard v0.1.13 // indirect
|
|
||||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
|
||||||
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect
|
|
||||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
github.com/georgysavva/scany v0.2.9 // indirect
|
||||||
github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect
|
github.com/go-kit/kit v0.10.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||||
github.com/georgysavva/scany v1.2.1 // indirect
|
github.com/go-stack/stack v1.8.0 // indirect
|
||||||
github.com/getsentry/sentry-go v0.22.0 // indirect
|
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
|
||||||
github.com/go-stack/stack v1.8.1 // indirect
|
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/google/go-cmp v0.5.6 // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect
|
||||||
|
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||||
github.com/holiman/uint256 v1.2.4 // indirect
|
|
||||||
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
|
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/ipfs/go-cid v0.4.1 // indirect
|
github.com/ipfs/bbloom v0.0.4 // indirect
|
||||||
|
github.com/ipfs/go-block-format v0.0.3 // indirect
|
||||||
|
github.com/ipfs/go-datastore v0.5.1 // indirect
|
||||||
|
github.com/ipfs/go-ipfs-util v0.0.2 // indirect
|
||||||
|
github.com/ipfs/go-ipld-format v0.2.0 // indirect
|
||||||
|
github.com/ipfs/go-log v1.0.5 // indirect
|
||||||
|
github.com/ipfs/go-log/v2 v2.4.0 // indirect
|
||||||
|
github.com/ipfs/go-metrics-interface v0.0.1 // indirect
|
||||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
github.com/jackc/pgconn v1.11.0 // indirect
|
github.com/jackc/pgconn v1.11.0 // indirect
|
||||||
github.com/jackc/pgio v1.0.0 // indirect
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
@ -60,64 +55,63 @@ require (
|
|||||||
github.com/jackc/pgproto3/v2 v2.2.0 // indirect
|
github.com/jackc/pgproto3/v2 v2.2.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||||
github.com/jackc/pgtype v1.10.0 // indirect
|
github.com/jackc/pgtype v1.10.0 // indirect
|
||||||
github.com/jackc/pgx/v4 v4.15.0 // indirect
|
|
||||||
github.com/jackc/puddle v1.2.1 // indirect
|
github.com/jackc/puddle v1.2.1 // indirect
|
||||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||||
github.com/klauspost/compress v1.16.7 // indirect
|
github.com/jmoiron/sqlx v1.2.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/lib/pq v1.10.9 // indirect
|
github.com/lib/pq v1.10.2 // indirect
|
||||||
github.com/magiconair/properties v1.8.6 // indirect
|
github.com/magiconair/properties v1.8.1 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
github.com/multiformats/go-base32 v0.0.4 // indirect
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
github.com/openrelayxyz/plugeth-utils v1.5.0 // indirect
|
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
github.com/onsi/gomega v1.13.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pganalyze/pg_query_go/v4 v4.2.1 // indirect
|
github.com/pelletier/go-toml v1.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.1.0 // indirect
|
||||||
github.com/prometheus/common v0.44.0 // indirect
|
github.com/prometheus/common v0.7.0 // indirect
|
||||||
github.com/prometheus/procfs v0.11.0 // indirect
|
github.com/prometheus/procfs v0.0.8 // indirect
|
||||||
github.com/rivo/uniseg v0.4.4 // indirect
|
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
github.com/smartystreets/assertions v1.0.0 // indirect
|
||||||
github.com/shopspring/decimal v1.2.0 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
github.com/spf13/afero v1.8.2 // indirect
|
github.com/spf13/afero v1.1.2 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.3.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.3 // indirect
|
||||||
github.com/subosito/gotenv v1.3.0 // indirect
|
github.com/stretchr/objx v0.2.0 // indirect
|
||||||
github.com/supranational/blst v0.3.11 // indirect
|
github.com/stretchr/testify v1.7.0 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
github.com/thoas/go-funk v0.9.3 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
golang.org/x/crypto v0.22.0 // indirect
|
go.uber.org/goleak v1.1.11 // indirect
|
||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
go.uber.org/multierr v1.7.0 // indirect
|
||||||
golang.org/x/sync v0.7.0 // indirect
|
go.uber.org/zap v1.19.1 // indirect
|
||||||
golang.org/x/sys v0.20.0 // indirect
|
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect
|
||||||
golang.org/x/term v0.19.0 // indirect
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/sys v0.0.0-20211209171907-798191bca915 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
google.golang.org/appengine v1.6.6 // indirect
|
||||||
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.51.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
lukechampine.com/blake3 v1.2.1 // indirect
|
lukechampine.com/blake3 v1.1.7 // indirect
|
||||||
rsc.io/tmplfunc v0.0.3 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
replace github.com/ethereum/go-ethereum v1.10.18 => github.com/vulcanize/go-ethereum v1.10.18-statediff-3.2.2
|
||||||
|
@ -1,256 +0,0 @@
|
|||||||
// Code generated by MockGen. DO NOT EDIT.
|
|
||||||
// Source: github.com/cerc-io/plugeth-statediff/indexer (interfaces: Indexer)
|
|
||||||
|
|
||||||
// Package mocks is a generated GoMock package.
|
|
||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
big "math/big"
|
|
||||||
reflect "reflect"
|
|
||||||
time "time"
|
|
||||||
|
|
||||||
interfaces "github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
|
||||||
models "github.com/cerc-io/plugeth-statediff/indexer/models"
|
|
||||||
types "github.com/cerc-io/plugeth-statediff/types"
|
|
||||||
common "github.com/ethereum/go-ethereum/common"
|
|
||||||
types0 "github.com/ethereum/go-ethereum/core/types"
|
|
||||||
gomock "github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockgenIndexer is a mock of Indexer interface.
|
|
||||||
type MockgenIndexer struct {
|
|
||||||
ctrl *gomock.Controller
|
|
||||||
recorder *MockgenIndexerMockRecorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// MockgenIndexerMockRecorder is the mock recorder for MockgenIndexer.
|
|
||||||
type MockgenIndexerMockRecorder struct {
|
|
||||||
mock *MockgenIndexer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMockgenIndexer creates a new mock instance.
|
|
||||||
func NewMockgenIndexer(ctrl *gomock.Controller) *MockgenIndexer {
|
|
||||||
mock := &MockgenIndexer{ctrl: ctrl}
|
|
||||||
mock.recorder = &MockgenIndexerMockRecorder{mock}
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
|
||||||
func (m *MockgenIndexer) EXPECT() *MockgenIndexerMockRecorder {
|
|
||||||
return m.recorder
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeginTx mocks base method.
|
|
||||||
func (m *MockgenIndexer) BeginTx(arg0 *big.Int, arg1 context.Context) interfaces.Batch {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "BeginTx", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(interfaces.Batch)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeginTx indicates an expected call of BeginTx.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) BeginTx(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginTx", reflect.TypeOf((*MockgenIndexer)(nil).BeginTx), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearWatchedAddresses mocks base method.
|
|
||||||
func (m *MockgenIndexer) ClearWatchedAddresses() error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "ClearWatchedAddresses")
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearWatchedAddresses indicates an expected call of ClearWatchedAddresses.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) ClearWatchedAddresses() *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).ClearWatchedAddresses))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close mocks base method.
|
|
||||||
func (m *MockgenIndexer) Close() error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "Close")
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close indicates an expected call of Close.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) Close() *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockgenIndexer)(nil).Close))
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentBlock mocks base method.
|
|
||||||
func (m *MockgenIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "CurrentBlock")
|
|
||||||
ret0, _ := ret[0].(*models.HeaderModel)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentBlock indicates an expected call of CurrentBlock.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) CurrentBlock() *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentBlock", reflect.TypeOf((*MockgenIndexer)(nil).CurrentBlock))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetectGaps mocks base method.
|
|
||||||
func (m *MockgenIndexer) DetectGaps(arg0, arg1 uint64) ([]*interfaces.BlockGap, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "DetectGaps", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].([]*interfaces.BlockGap)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetectGaps indicates an expected call of DetectGaps.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) DetectGaps(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetectGaps", reflect.TypeOf((*MockgenIndexer)(nil).DetectGaps), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBlock mocks base method.
|
|
||||||
func (m *MockgenIndexer) HasBlock(arg0 common.Hash, arg1 uint64) (bool, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "HasBlock", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(bool)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBlock indicates an expected call of HasBlock.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) HasBlock(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasBlock", reflect.TypeOf((*MockgenIndexer)(nil).HasBlock), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertWatchedAddresses mocks base method.
|
|
||||||
func (m *MockgenIndexer) InsertWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "InsertWatchedAddresses", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertWatchedAddresses indicates an expected call of InsertWatchedAddresses.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) InsertWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).InsertWatchedAddresses), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadWatchedAddresses mocks base method.
|
|
||||||
func (m *MockgenIndexer) LoadWatchedAddresses() ([]common.Address, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "LoadWatchedAddresses")
|
|
||||||
ret0, _ := ret[0].([]common.Address)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadWatchedAddresses indicates an expected call of LoadWatchedAddresses.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) LoadWatchedAddresses() *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).LoadWatchedAddresses))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushBlock mocks base method.
|
|
||||||
func (m *MockgenIndexer) PushBlock(arg0 *types0.Block, arg1 types0.Receipts, arg2 *big.Int) (interfaces.Batch, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "PushBlock", arg0, arg1, arg2)
|
|
||||||
ret0, _ := ret[0].(interfaces.Batch)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushBlock indicates an expected call of PushBlock.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) PushBlock(arg0, arg1, arg2 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushBlock", reflect.TypeOf((*MockgenIndexer)(nil).PushBlock), arg0, arg1, arg2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushHeader mocks base method.
|
|
||||||
func (m *MockgenIndexer) PushHeader(arg0 interfaces.Batch, arg1 *types0.Header, arg2, arg3 *big.Int) (string, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "PushHeader", arg0, arg1, arg2, arg3)
|
|
||||||
ret0, _ := ret[0].(string)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushHeader indicates an expected call of PushHeader.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) PushHeader(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushHeader", reflect.TypeOf((*MockgenIndexer)(nil).PushHeader), arg0, arg1, arg2, arg3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushIPLD mocks base method.
|
|
||||||
func (m *MockgenIndexer) PushIPLD(arg0 interfaces.Batch, arg1 types.IPLD) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "PushIPLD", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushIPLD indicates an expected call of PushIPLD.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) PushIPLD(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushIPLD", reflect.TypeOf((*MockgenIndexer)(nil).PushIPLD), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushStateNode mocks base method.
|
|
||||||
func (m *MockgenIndexer) PushStateNode(arg0 interfaces.Batch, arg1 types.StateLeafNode, arg2 string) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "PushStateNode", arg0, arg1, arg2)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushStateNode indicates an expected call of PushStateNode.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) PushStateNode(arg0, arg1, arg2 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushStateNode", reflect.TypeOf((*MockgenIndexer)(nil).PushStateNode), arg0, arg1, arg2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveWatchedAddresses mocks base method.
|
|
||||||
func (m *MockgenIndexer) RemoveWatchedAddresses(arg0 []types.WatchAddressArg) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "RemoveWatchedAddresses", arg0)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveWatchedAddresses indicates an expected call of RemoveWatchedAddresses.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) RemoveWatchedAddresses(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).RemoveWatchedAddresses), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReportDBMetrics mocks base method.
|
|
||||||
func (m *MockgenIndexer) ReportDBMetrics(arg0 time.Duration, arg1 <-chan bool) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
m.ctrl.Call(m, "ReportDBMetrics", arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReportDBMetrics indicates an expected call of ReportDBMetrics.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) ReportDBMetrics(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportDBMetrics", reflect.TypeOf((*MockgenIndexer)(nil).ReportDBMetrics), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWatchedAddresses mocks base method.
|
|
||||||
func (m *MockgenIndexer) SetWatchedAddresses(arg0 []types.WatchAddressArg, arg1 *big.Int) error {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "SetWatchedAddresses", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].(error)
|
|
||||||
return ret0
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWatchedAddresses indicates an expected call of SetWatchedAddresses.
|
|
||||||
func (mr *MockgenIndexerMockRecorder) SetWatchedAddresses(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWatchedAddresses", reflect.TypeOf((*MockgenIndexer)(nil).SetWatchedAddresses), arg0, arg1)
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
package mocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer"
|
|
||||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/golang/mock/gomock"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Indexer just caches data but wraps a gomock instance, so we can mock other methods if needed
|
|
||||||
type Indexer struct {
|
|
||||||
*MockgenIndexer
|
|
||||||
sync.RWMutex
|
|
||||||
|
|
||||||
IndexerData
|
|
||||||
}
|
|
||||||
|
|
||||||
type IndexerData struct {
|
|
||||||
Headers map[uint64]*types.Header
|
|
||||||
StateNodes []sdtypes.StateLeafNode
|
|
||||||
IPLDs []sdtypes.IPLD
|
|
||||||
}
|
|
||||||
|
|
||||||
// no-op mock Batch
|
|
||||||
type Batch struct{}
|
|
||||||
|
|
||||||
// NewIndexer returns a mock indexer that caches data in lists
|
|
||||||
func NewIndexer(t *testing.T) *Indexer {
|
|
||||||
ctl := gomock.NewController(t)
|
|
||||||
return &Indexer{
|
|
||||||
MockgenIndexer: NewMockgenIndexer(ctl),
|
|
||||||
IndexerData: IndexerData{
|
|
||||||
Headers: make(map[uint64]*types.Header),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Indexer) PushHeader(_ indexer.Batch, header *types.Header, _, _ *big.Int) (string, error) {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
i.Headers[header.Number.Uint64()] = header
|
|
||||||
return header.Hash().String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Indexer) PushStateNode(_ indexer.Batch, stateNode sdtypes.StateLeafNode, _ string) error {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
i.StateNodes = append(i.StateNodes, stateNode)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Indexer) PushIPLD(_ indexer.Batch, ipld sdtypes.IPLD) error {
|
|
||||||
i.Lock()
|
|
||||||
defer i.Unlock()
|
|
||||||
i.IPLDs = append(i.IPLDs, ipld)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Indexer) BeginTx(_ *big.Int, _ context.Context) indexer.Batch {
|
|
||||||
return Batch{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (Batch) Submit() error { return nil }
|
|
||||||
func (Batch) BlockNumber() string { return "0" }
|
|
||||||
func (Batch) RollbackOnFailure(error) {}
|
|
||||||
|
|
||||||
// InterruptingIndexer triggers an artificial failure at a specific node count
|
|
||||||
type InterruptingIndexer struct {
|
|
||||||
*Indexer
|
|
||||||
|
|
||||||
InterruptAfter uint
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *InterruptingIndexer) PushStateNode(b indexer.Batch, stateNode sdtypes.StateLeafNode, h string) error {
|
|
||||||
i.RLock()
|
|
||||||
indexedCount := len(i.StateNodes)
|
|
||||||
i.RUnlock()
|
|
||||||
if indexedCount >= int(i.InterruptAfter) {
|
|
||||||
return fmt.Errorf("mock interrupt")
|
|
||||||
}
|
|
||||||
return i.Indexer.PushStateNode(b, stateNode, h)
|
|
||||||
}
|
|
2
main.go
2
main.go
@ -18,7 +18,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/cmd"
|
"github.com/vulcanize/ipld-eth-state-snapshot/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -19,12 +19,12 @@ package prom
|
|||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
mets "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DBStatsGetter is an interface that gets sql.DBStats.
|
// DBStatsGetter is an interface that gets sql.DBStats.
|
||||||
type DBStatsGetter interface {
|
type DBStatsGetter interface {
|
||||||
Stats() mets.DbStats
|
Stats() sql.Stats
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBStatsCollector implements the prometheus.Collector interface.
|
// DBStatsCollector implements the prometheus.Collector interface.
|
||||||
|
@ -33,6 +33,7 @@ var (
|
|||||||
|
|
||||||
stateNodeCount prometheus.Counter
|
stateNodeCount prometheus.Counter
|
||||||
storageNodeCount prometheus.Counter
|
storageNodeCount prometheus.Counter
|
||||||
|
codeNodeCount prometheus.Counter
|
||||||
)
|
)
|
||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
@ -51,16 +52,13 @@ func Init() {
|
|||||||
Name: "storage_node_count",
|
Name: "storage_node_count",
|
||||||
Help: "Number of storage nodes processed",
|
Help: "Number of storage nodes processed",
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterGaugeFunc(name string, function func() float64) {
|
codeNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
promauto.NewGaugeFunc(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: statsSubsystem,
|
Subsystem: statsSubsystem,
|
||||||
Name: name,
|
Name: "code_node_count",
|
||||||
Help: name,
|
Help: "Number of code nodes processed",
|
||||||
}, function)
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterDBCollector create metric collector for given connection
|
// RegisterDBCollector create metric collector for given connection
|
||||||
@ -77,13 +75,16 @@ func IncStateNodeCount() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddStorageNodeCount increments the number of storage nodes processed
|
// IncStorageNodeCount increments the number of storage nodes processed
|
||||||
func AddStorageNodeCount(count int) {
|
func IncStorageNodeCount() {
|
||||||
if metrics && count > 0 {
|
if metrics {
|
||||||
storageNodeCount.Add(float64(count))
|
storageNodeCount.Inc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Enabled() bool {
|
// IncCodeNodeCount increments the number of code nodes processed
|
||||||
return metrics
|
func IncCodeNodeCount() {
|
||||||
|
if metrics {
|
||||||
|
codeNodeCount.Inc()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,174 +0,0 @@
|
|||||||
package prom
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
iterutil "github.com/cerc-io/eth-iterator-utils"
|
|
||||||
"github.com/cerc-io/eth-iterator-utils/tracker"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
)
|
|
||||||
|
|
||||||
var trackedIterCount atomic.Int32
|
|
||||||
|
|
||||||
// Tracker which wraps a tracked iterators in metrics-reporting iterators
|
|
||||||
type MetricsTracker struct {
|
|
||||||
*tracker.TrackerImpl
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricsIterator struct {
|
|
||||||
trie.NodeIterator
|
|
||||||
id int32
|
|
||||||
// count uint
|
|
||||||
done bool
|
|
||||||
lastPath []byte
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTracker(file string, bufsize uint) *MetricsTracker {
|
|
||||||
return &MetricsTracker{TrackerImpl: tracker.NewImpl(file, bufsize)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *MetricsTracker) wrap(tracked *tracker.Iterator) *metricsIterator {
|
|
||||||
startPath, endPath := tracked.Bounds()
|
|
||||||
pathDepth := max(max(len(startPath), len(endPath)), 1)
|
|
||||||
totalSteps := estimateSteps(startPath, endPath, pathDepth)
|
|
||||||
|
|
||||||
ret := &metricsIterator{
|
|
||||||
NodeIterator: tracked,
|
|
||||||
id: trackedIterCount.Add(1),
|
|
||||||
}
|
|
||||||
|
|
||||||
RegisterGaugeFunc(
|
|
||||||
fmt.Sprintf("tracked_iterator_%d", ret.id),
|
|
||||||
func() float64 {
|
|
||||||
ret.RLock()
|
|
||||||
done := ret.done
|
|
||||||
lastPath := ret.lastPath
|
|
||||||
ret.RUnlock()
|
|
||||||
|
|
||||||
if done {
|
|
||||||
return 100.0
|
|
||||||
}
|
|
||||||
|
|
||||||
if lastPath == nil {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// estimate remaining distance based on current position and node count
|
|
||||||
remainingSteps := estimateSteps(lastPath, endPath, pathDepth)
|
|
||||||
return (float64(totalSteps) - float64(remainingSteps)) / float64(totalSteps) * 100.0
|
|
||||||
})
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *MetricsTracker) Restore(ctor iterutil.IteratorConstructor) (
|
|
||||||
[]trie.NodeIterator, []trie.NodeIterator, error,
|
|
||||||
) {
|
|
||||||
iters, bases, err := t.TrackerImpl.Restore(ctor)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
ret := make([]trie.NodeIterator, len(iters))
|
|
||||||
for i, tracked := range iters {
|
|
||||||
ret[i] = t.wrap(tracked)
|
|
||||||
}
|
|
||||||
return ret, bases, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *MetricsTracker) Tracked(it trie.NodeIterator) trie.NodeIterator {
|
|
||||||
tracked := t.TrackerImpl.Tracked(it)
|
|
||||||
return t.wrap(tracked)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *metricsIterator) Next(descend bool) bool {
|
|
||||||
ret := it.NodeIterator.Next(descend)
|
|
||||||
it.Lock()
|
|
||||||
defer it.Unlock()
|
|
||||||
if ret {
|
|
||||||
it.lastPath = it.Path()
|
|
||||||
} else {
|
|
||||||
it.done = true
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Estimate the number of iterations necessary to step from start to end.
|
|
||||||
func estimateSteps(start []byte, end []byte, depth int) uint64 {
|
|
||||||
// We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
|
|
||||||
// For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
|
|
||||||
// start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
|
|
||||||
// be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
|
|
||||||
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
|
|
||||||
// to move from 0500:06 is 16.
|
|
||||||
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
|
|
||||||
if 0 == len(start) {
|
|
||||||
start = []byte{0x0}
|
|
||||||
}
|
|
||||||
if 0 == len(end) {
|
|
||||||
end = []byte{0x10}
|
|
||||||
}
|
|
||||||
normalizedStart := make([]byte, depth)
|
|
||||||
normalizedEnd := make([]byte, depth)
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
if i < len(start) {
|
|
||||||
normalizedStart[i] = start[i]
|
|
||||||
}
|
|
||||||
if i < len(end) {
|
|
||||||
normalizedEnd[i] = end[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return normalizedStart, normalizedEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have no need to handle negative exponents, so uints are fine.
|
|
||||||
pow := func(x uint64, y uint) uint64 {
|
|
||||||
if 0 == y {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
ret := x
|
|
||||||
for i := uint(0); i < y; i++ {
|
|
||||||
ret *= x
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fix the paths.
|
|
||||||
start, end = normalizePathRange(start, end, depth)
|
|
||||||
|
|
||||||
// No negative distances, if the start is already >= end, the distance is 0.
|
|
||||||
if bytes.Compare(start, end) >= 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subtract each component, right to left, carrying over if necessary.
|
|
||||||
difference := make([]byte, len(start))
|
|
||||||
var carry byte = 0
|
|
||||||
for i := len(start) - 1; i >= 0; i-- {
|
|
||||||
result := end[i] - start[i] - carry
|
|
||||||
if result > 0xf && i > 0 {
|
|
||||||
result &= 0xf
|
|
||||||
carry = 1
|
|
||||||
} else {
|
|
||||||
carry = 0
|
|
||||||
}
|
|
||||||
difference[i] = result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the result.
|
|
||||||
var ret uint64 = 0
|
|
||||||
for i := 0; i < len(difference); i++ {
|
|
||||||
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func max(a int, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
@ -19,13 +19,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
ethNode "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
ethNode "github.com/cerc-io/plugeth-statediff/indexer/node"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,55 +38,39 @@ const (
|
|||||||
|
|
||||||
// Config contains params for both databases the service uses
|
// Config contains params for both databases the service uses
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Eth *EthDBConfig
|
Eth *EthConfig
|
||||||
DB *DBConfig
|
DB *DBConfig
|
||||||
File *FileConfig
|
File *FileConfig
|
||||||
Service *ServiceConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// EthDBConfig is config parameters for the chain DB.
|
// EthConfig is config parameters for the chain.
|
||||||
type EthDBConfig struct {
|
type EthConfig struct {
|
||||||
DBPath string
|
LevelDBPath string
|
||||||
AncientDBPath string
|
AncientDBPath string
|
||||||
NodeInfo ethNode.Info
|
NodeInfo ethNode.Info
|
||||||
}
|
}
|
||||||
|
|
||||||
// DBConfig contains options for DB output mode.
|
// DBConfig is config parameters for DB.
|
||||||
type DBConfig = postgres.Config
|
type DBConfig struct {
|
||||||
|
URI string
|
||||||
|
ConnConfig postgres.Config
|
||||||
|
}
|
||||||
|
|
||||||
// FileConfig contains options for file output mode. Note that this service currently only supports
|
type FileConfig struct {
|
||||||
// CSV output, and does not record watched addresses, so not all fields are used.
|
OutputDir string
|
||||||
type FileConfig = file.Config
|
|
||||||
|
|
||||||
type ServiceConfig struct {
|
|
||||||
AllowedAccounts []common.Address
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConfig(mode SnapshotMode) (*Config, error) {
|
func NewConfig(mode SnapshotMode) (*Config, error) {
|
||||||
ret := &Config{
|
ret := &Config{
|
||||||
&EthDBConfig{},
|
&EthConfig{},
|
||||||
&DBConfig{},
|
&DBConfig{},
|
||||||
&FileConfig{},
|
&FileConfig{},
|
||||||
&ServiceConfig{},
|
|
||||||
}
|
}
|
||||||
return ret, ret.Init(mode)
|
return ret, ret.Init(mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInPlaceSnapshotConfig() *Config {
|
|
||||||
ret := &Config{
|
|
||||||
&EthDBConfig{},
|
|
||||||
&DBConfig{},
|
|
||||||
&FileConfig{},
|
|
||||||
&ServiceConfig{},
|
|
||||||
}
|
|
||||||
InitDB(ret.DB)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init Initialises config
|
// Init Initialises config
|
||||||
func (c *Config) Init(mode SnapshotMode) error {
|
func (c *Config) Init(mode SnapshotMode) error {
|
||||||
viper.BindEnv(LOG_FILE_TOML, LOG_FILE)
|
|
||||||
viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID)
|
viper.BindEnv(ETH_NODE_ID_TOML, ETH_NODE_ID)
|
||||||
viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME)
|
viper.BindEnv(ETH_CLIENT_NAME_TOML, ETH_CLIENT_NAME)
|
||||||
viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK)
|
viper.BindEnv(ETH_GENESIS_BLOCK_TOML, ETH_GENESIS_BLOCK)
|
||||||
@ -104,27 +85,24 @@ func (c *Config) Init(mode SnapshotMode) error {
|
|||||||
ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML),
|
ChainID: viper.GetUint64(ETH_CHAIN_ID_TOML),
|
||||||
}
|
}
|
||||||
|
|
||||||
viper.BindEnv(ETHDB_ANCIENT_TOML, ETHDB_ANCIENT)
|
viper.BindEnv(ANCIENT_DB_PATH_TOML, ANCIENT_DB_PATH)
|
||||||
viper.BindEnv(ETHDB_PATH_TOML, ETHDB_PATH)
|
viper.BindEnv(LVL_DB_PATH_TOML, LVL_DB_PATH)
|
||||||
|
|
||||||
c.Eth.DBPath = viper.GetString(ETHDB_PATH_TOML)
|
c.Eth.AncientDBPath = viper.GetString(ANCIENT_DB_PATH_TOML)
|
||||||
c.Eth.AncientDBPath = viper.GetString(ETHDB_ANCIENT_TOML)
|
c.Eth.LevelDBPath = viper.GetString(LVL_DB_PATH_TOML)
|
||||||
if len(c.Eth.AncientDBPath) == 0 {
|
|
||||||
c.Eth.AncientDBPath = c.Eth.DBPath + "/ancient"
|
|
||||||
}
|
|
||||||
|
|
||||||
switch mode {
|
switch mode {
|
||||||
case FileSnapshot:
|
case FileSnapshot:
|
||||||
InitFile(c.File)
|
c.File.Init()
|
||||||
case PgSnapshot:
|
case PgSnapshot:
|
||||||
InitDB(c.DB)
|
c.DB.Init()
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("no output mode specified")
|
return fmt.Errorf("no output mode specified")
|
||||||
}
|
}
|
||||||
return c.Service.Init()
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitDB(c *DBConfig) {
|
func (c *DBConfig) Init() {
|
||||||
viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME)
|
viper.BindEnv(DATABASE_NAME_TOML, DATABASE_NAME)
|
||||||
viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME)
|
viper.BindEnv(DATABASE_HOSTNAME_TOML, DATABASE_HOSTNAME)
|
||||||
viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT)
|
viper.BindEnv(DATABASE_PORT_TOML, DATABASE_PORT)
|
||||||
@ -134,55 +112,28 @@ func InitDB(c *DBConfig) {
|
|||||||
viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS)
|
viper.BindEnv(DATABASE_MAX_OPEN_CONNECTIONS_TOML, DATABASE_MAX_OPEN_CONNECTIONS)
|
||||||
viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME)
|
viper.BindEnv(DATABASE_MAX_CONN_LIFETIME_TOML, DATABASE_MAX_CONN_LIFETIME)
|
||||||
|
|
||||||
|
dbParams := postgres.Config{}
|
||||||
// DB params
|
// DB params
|
||||||
c.DatabaseName = viper.GetString(DATABASE_NAME_TOML)
|
dbParams.DatabaseName = viper.GetString(DATABASE_NAME_TOML)
|
||||||
c.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML)
|
dbParams.Hostname = viper.GetString(DATABASE_HOSTNAME_TOML)
|
||||||
c.Port = viper.GetInt(DATABASE_PORT_TOML)
|
dbParams.Port = viper.GetInt(DATABASE_PORT_TOML)
|
||||||
c.Username = viper.GetString(DATABASE_USER_TOML)
|
dbParams.Username = viper.GetString(DATABASE_USER_TOML)
|
||||||
c.Password = viper.GetString(DATABASE_PASSWORD_TOML)
|
dbParams.Password = viper.GetString(DATABASE_PASSWORD_TOML)
|
||||||
// Connection config
|
// Connection config
|
||||||
c.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML)
|
dbParams.MaxIdle = viper.GetInt(DATABASE_MAX_IDLE_CONNECTIONS_TOML)
|
||||||
c.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML)
|
dbParams.MaxConns = viper.GetInt(DATABASE_MAX_OPEN_CONNECTIONS_TOML)
|
||||||
c.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second
|
dbParams.MaxConnLifetime = time.Duration(viper.GetInt(DATABASE_MAX_CONN_LIFETIME_TOML)) * time.Second
|
||||||
|
|
||||||
c.Driver = postgres.SQLX
|
c.ConnConfig = dbParams
|
||||||
|
c.URI = dbParams.DbConnectionString()
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitFile(c *FileConfig) error {
|
func (c *FileConfig) Init() error {
|
||||||
viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR)
|
viper.BindEnv(FILE_OUTPUT_DIR_TOML, FILE_OUTPUT_DIR)
|
||||||
c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML)
|
c.OutputDir = viper.GetString(FILE_OUTPUT_DIR_TOML)
|
||||||
if c.OutputDir == "" {
|
if c.OutputDir == "" {
|
||||||
logrus.Infof("no output directory set, using default: %s", defaultOutputDir)
|
logrus.Infof("no output directory set, using default: %s", defaultOutputDir)
|
||||||
c.OutputDir = defaultOutputDir
|
c.OutputDir = defaultOutputDir
|
||||||
}
|
}
|
||||||
// Only support CSV for now
|
|
||||||
c.Mode = file.CSV
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ServiceConfig) Init() error {
|
|
||||||
viper.BindEnv(SNAPSHOT_BLOCK_HEIGHT_TOML, SNAPSHOT_BLOCK_HEIGHT)
|
|
||||||
viper.BindEnv(SNAPSHOT_MODE_TOML, SNAPSHOT_MODE)
|
|
||||||
viper.BindEnv(SNAPSHOT_WORKERS_TOML, SNAPSHOT_WORKERS)
|
|
||||||
viper.BindEnv(SNAPSHOT_RECOVERY_FILE_TOML, SNAPSHOT_RECOVERY_FILE)
|
|
||||||
|
|
||||||
viper.BindEnv(PROM_DB_STATS_TOML, PROM_DB_STATS)
|
|
||||||
viper.BindEnv(PROM_HTTP_TOML, PROM_HTTP)
|
|
||||||
viper.BindEnv(PROM_HTTP_ADDR_TOML, PROM_HTTP_ADDR)
|
|
||||||
viper.BindEnv(PROM_HTTP_PORT_TOML, PROM_HTTP_PORT)
|
|
||||||
viper.BindEnv(PROM_METRICS_TOML, PROM_METRICS)
|
|
||||||
|
|
||||||
viper.BindEnv(SNAPSHOT_ACCOUNTS_TOML, SNAPSHOT_ACCOUNTS)
|
|
||||||
var allowedAccounts []string
|
|
||||||
viper.UnmarshalKey(SNAPSHOT_ACCOUNTS_TOML, &allowedAccounts)
|
|
||||||
accountsLen := len(allowedAccounts)
|
|
||||||
if accountsLen != 0 {
|
|
||||||
c.AllowedAccounts = make([]common.Address, 0, accountsLen)
|
|
||||||
for _, allowedAccount := range allowedAccounts {
|
|
||||||
c.AllowedAccounts = append(c.AllowedAccounts, common.HexToAddress(allowedAccount))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logrus.Infof("no snapshot addresses specified, will perform snapshot of entire trie(s)")
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
package snapshot_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
|
||||||
ethnode "github.com/cerc-io/plugeth-statediff/indexer/node"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
DefaultNodeInfo = ethnode.Info{
|
|
||||||
ID: "test_nodeid",
|
|
||||||
ClientName: "test_client",
|
|
||||||
GenesisBlock: "TEST_GENESIS",
|
|
||||||
NetworkID: "test_network",
|
|
||||||
ChainID: 0,
|
|
||||||
}
|
|
||||||
DefaultPgConfig = postgres.Config{
|
|
||||||
Hostname: "localhost",
|
|
||||||
Port: 8077,
|
|
||||||
DatabaseName: "cerc_testing",
|
|
||||||
Username: "vdbm",
|
|
||||||
Password: "password",
|
|
||||||
|
|
||||||
MaxIdle: 0,
|
|
||||||
MaxConnLifetime: 0,
|
|
||||||
MaxConns: 4,
|
|
||||||
}
|
|
||||||
)
|
|
@ -21,10 +21,9 @@ const (
|
|||||||
SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS"
|
SNAPSHOT_WORKERS = "SNAPSHOT_WORKERS"
|
||||||
SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE"
|
SNAPSHOT_RECOVERY_FILE = "SNAPSHOT_RECOVERY_FILE"
|
||||||
SNAPSHOT_MODE = "SNAPSHOT_MODE"
|
SNAPSHOT_MODE = "SNAPSHOT_MODE"
|
||||||
SNAPSHOT_ACCOUNTS = "SNAPSHOT_ACCOUNTS"
|
|
||||||
|
|
||||||
LOG_LEVEL = "LOG_LEVEL"
|
LOGRUS_LEVEL = "LOGRUS_LEVEL"
|
||||||
LOG_FILE = "LOG_FILE"
|
LOGRUS_FILE = "LOGRUS_FILE"
|
||||||
|
|
||||||
PROM_METRICS = "PROM_METRICS"
|
PROM_METRICS = "PROM_METRICS"
|
||||||
PROM_HTTP = "PROM_HTTP"
|
PROM_HTTP = "PROM_HTTP"
|
||||||
@ -34,8 +33,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR"
|
FILE_OUTPUT_DIR = "FILE_OUTPUT_DIR"
|
||||||
|
|
||||||
ETHDB_ANCIENT = "ETHDB_ANCIENT"
|
ANCIENT_DB_PATH = "ANCIENT_DB_PATH"
|
||||||
ETHDB_PATH = "ETHDB_PATH"
|
LVL_DB_PATH = "LVL_DB_PATH"
|
||||||
|
|
||||||
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
ETH_CLIENT_NAME = "ETH_CLIENT_NAME"
|
||||||
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
ETH_GENESIS_BLOCK = "ETH_GENESIS_BLOCK"
|
||||||
@ -59,10 +58,9 @@ const (
|
|||||||
SNAPSHOT_WORKERS_TOML = "snapshot.workers"
|
SNAPSHOT_WORKERS_TOML = "snapshot.workers"
|
||||||
SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile"
|
SNAPSHOT_RECOVERY_FILE_TOML = "snapshot.recoveryFile"
|
||||||
SNAPSHOT_MODE_TOML = "snapshot.mode"
|
SNAPSHOT_MODE_TOML = "snapshot.mode"
|
||||||
SNAPSHOT_ACCOUNTS_TOML = "snapshot.accounts"
|
|
||||||
|
|
||||||
LOG_LEVEL_TOML = "log.level"
|
LOGRUS_LEVEL_TOML = "log.level"
|
||||||
LOG_FILE_TOML = "log.file"
|
LOGRUS_FILE_TOML = "log.file"
|
||||||
|
|
||||||
PROM_METRICS_TOML = "prom.metrics"
|
PROM_METRICS_TOML = "prom.metrics"
|
||||||
PROM_HTTP_TOML = "prom.http"
|
PROM_HTTP_TOML = "prom.http"
|
||||||
@ -72,8 +70,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR_TOML = "file.outputDir"
|
FILE_OUTPUT_DIR_TOML = "file.outputDir"
|
||||||
|
|
||||||
ETHDB_ANCIENT_TOML = "ethdb.ancient"
|
ANCIENT_DB_PATH_TOML = "leveldb.ancient"
|
||||||
ETHDB_PATH_TOML = "ethdb.path"
|
LVL_DB_PATH_TOML = "leveldb.path"
|
||||||
|
|
||||||
ETH_CLIENT_NAME_TOML = "ethereum.clientName"
|
ETH_CLIENT_NAME_TOML = "ethereum.clientName"
|
||||||
ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock"
|
ETH_GENESIS_BLOCK_TOML = "ethereum.genesisBlock"
|
||||||
@ -97,10 +95,9 @@ const (
|
|||||||
SNAPSHOT_WORKERS_CLI = "workers"
|
SNAPSHOT_WORKERS_CLI = "workers"
|
||||||
SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file"
|
SNAPSHOT_RECOVERY_FILE_CLI = "recovery-file"
|
||||||
SNAPSHOT_MODE_CLI = "snapshot-mode"
|
SNAPSHOT_MODE_CLI = "snapshot-mode"
|
||||||
SNAPSHOT_ACCOUNTS_CLI = "snapshot-accounts"
|
|
||||||
|
|
||||||
LOG_LEVEL_CLI = "log-level"
|
LOGRUS_LEVEL_CLI = "log-level"
|
||||||
LOG_FILE_CLI = "log-file"
|
LOGRUS_FILE_CLI = "log-file"
|
||||||
|
|
||||||
PROM_METRICS_CLI = "prom-metrics"
|
PROM_METRICS_CLI = "prom-metrics"
|
||||||
PROM_HTTP_CLI = "prom-http"
|
PROM_HTTP_CLI = "prom-http"
|
||||||
@ -110,8 +107,8 @@ const (
|
|||||||
|
|
||||||
FILE_OUTPUT_DIR_CLI = "output-dir"
|
FILE_OUTPUT_DIR_CLI = "output-dir"
|
||||||
|
|
||||||
ETHDB_ANCIENT_CLI = "ancient-path"
|
ANCIENT_DB_PATH_CLI = "ancient-path"
|
||||||
ETHDB_PATH_CLI = "ethdb-path"
|
LVL_DB_PATH_CLI = "leveldb-path"
|
||||||
|
|
||||||
ETH_CLIENT_NAME_CLI = "ethereum-client-name"
|
ETH_CLIENT_NAME_CLI = "ethereum-client-name"
|
||||||
ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block"
|
ETH_GENESIS_BLOCK_CLI = "ethereum-genesis-block"
|
||||||
|
301
pkg/snapshot/file/publisher.go
Normal file
301
pkg/snapshot/file/publisher.go
Normal file
@ -0,0 +1,301 @@
|
|||||||
|
// Copyright © 2020 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package publisher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ snapt.Publisher = (*publisher)(nil)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// tables written once per block
|
||||||
|
perBlockTables = []*snapt.Table{
|
||||||
|
&snapt.TableIPLDBlock,
|
||||||
|
&snapt.TableNodeInfo,
|
||||||
|
&snapt.TableHeader,
|
||||||
|
}
|
||||||
|
// tables written during state iteration
|
||||||
|
perNodeTables = []*snapt.Table{
|
||||||
|
&snapt.TableIPLDBlock,
|
||||||
|
&snapt.TableStateNode,
|
||||||
|
&snapt.TableStorageNode,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const logInterval = 1 * time.Minute
|
||||||
|
|
||||||
|
type publisher struct {
|
||||||
|
dir string // dir containing output files
|
||||||
|
writers fileWriters
|
||||||
|
|
||||||
|
nodeInfo nodeinfo.Info
|
||||||
|
|
||||||
|
startTime time.Time
|
||||||
|
currBatchSize uint
|
||||||
|
stateNodeCounter uint64
|
||||||
|
storageNodeCounter uint64
|
||||||
|
codeNodeCounter uint64
|
||||||
|
txCounter uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileWriter struct {
|
||||||
|
*csv.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileWriters wraps the file writers for each output table
|
||||||
|
type fileWriters map[string]fileWriter
|
||||||
|
|
||||||
|
type fileTx struct{ fileWriters }
|
||||||
|
|
||||||
|
func (tx fileWriters) Commit() error {
|
||||||
|
for _, w := range tx {
|
||||||
|
w.Flush()
|
||||||
|
if err := w.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (fileWriters) Rollback() error { return nil } // TODO: delete the file?
|
||||||
|
|
||||||
|
func newFileWriter(path string) (ret fileWriter, err error) {
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ret = fileWriter{csv.NewWriter(file)}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx fileWriters) write(tbl *snapt.Table, args ...interface{}) error {
|
||||||
|
row := tbl.ToCsvRow(args...)
|
||||||
|
return tx[tbl.Name].Write(row)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeFileWriters(dir string, tables []*snapt.Table) (fileWriters, error) {
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
writers := fileWriters{}
|
||||||
|
for _, tbl := range tables {
|
||||||
|
w, err := newFileWriter(TableFile(dir, tbl.Name))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
writers[tbl.Name] = w
|
||||||
|
}
|
||||||
|
return writers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPublisher creates a publisher which writes to per-table CSV files which can be imported
|
||||||
|
// with the Postgres COPY command.
|
||||||
|
// The output directory will be created if it does not exist.
|
||||||
|
func NewPublisher(path string, node nodeinfo.Info) (*publisher, error) {
|
||||||
|
if err := os.MkdirAll(path, 0777); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to make MkdirAll for path: %s err: %s", path, err)
|
||||||
|
}
|
||||||
|
writers, err := makeFileWriters(path, perBlockTables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pub := &publisher{
|
||||||
|
writers: writers,
|
||||||
|
dir: path,
|
||||||
|
nodeInfo: node,
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
go pub.logNodeCounters()
|
||||||
|
return pub, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TableFile(dir, name string) string { return filepath.Join(dir, name+".csv") }
|
||||||
|
|
||||||
|
func (p *publisher) txDir(index uint32) string {
|
||||||
|
return filepath.Join(p.dir, fmt.Sprintf("%010d", index))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) BeginTx() (snapt.Tx, error) {
|
||||||
|
index := atomic.AddUint32(&p.txCounter, 1) - 1
|
||||||
|
dir := p.txDir(index)
|
||||||
|
writers, err := makeFileWriters(dir, perNodeTables)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fileTx{writers}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
||||||
|
// returns the CID and blockstore prefixed multihash key
|
||||||
|
func (tx fileWriters) publishRaw(codec uint64, raw []byte) (cid, prefixedKey string, err error) {
|
||||||
|
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cid = c.String()
|
||||||
|
prefixedKey, err = tx.publishIPLD(c, raw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx fileWriters) publishIPLD(c cid.Cid, raw []byte) (string, error) {
|
||||||
|
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||||
|
return prefixedKey, tx.write(&snapt.TableIPLDBlock, prefixedKey, raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary
|
||||||
|
// indexes in the header_cids table
|
||||||
|
func (p *publisher) PublishHeader(header *types.Header) error {
|
||||||
|
headerNode, err := ipld.NewEthHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = p.writers.publishIPLD(headerNode.Cid(), headerNode.RawData()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
|
||||||
|
err = p.writers.write(&snapt.TableNodeInfo, p.nodeInfo.GenesisBlock, p.nodeInfo.NetworkID, p.nodeInfo.ID,
|
||||||
|
p.nodeInfo.ClientName, p.nodeInfo.ChainID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = p.writers.write(&snapt.TableHeader, header.Number.String(), header.Hash().Hex(), header.ParentHash.Hex(),
|
||||||
|
headerNode.Cid().String(), 0, p.nodeInfo.ID, 0, header.Root.Hex(), header.TxHash.Hex(),
|
||||||
|
header.ReceiptHash.Hex(), header.UncleHash.Hex(), header.Bloom.Bytes(), header.Time, mhKey,
|
||||||
|
0, header.Coinbase.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return p.writers.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes
|
||||||
|
// in the state_cids table
|
||||||
|
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, snapTx snapt.Tx) error {
|
||||||
|
var stateKey string
|
||||||
|
if !snapt.IsNullHash(node.Key) {
|
||||||
|
stateKey = node.Key.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(fileTx)
|
||||||
|
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.write(&snapt.TableStateNode, headerID, stateKey, stateCIDStr, node.Path,
|
||||||
|
node.NodeType, false, mhKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// increment state node counter.
|
||||||
|
atomic.AddUint64(&p.stateNodeCounter, 1)
|
||||||
|
prom.IncStateNodeCount()
|
||||||
|
|
||||||
|
// increment current batch size counter
|
||||||
|
p.currBatchSize += 2
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary
|
||||||
|
// indexes in the storage_cids table
|
||||||
|
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, statePath []byte, snapTx snapt.Tx) error {
|
||||||
|
var storageKey string
|
||||||
|
if !snapt.IsNullHash(node.Key) {
|
||||||
|
storageKey = node.Key.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(fileTx)
|
||||||
|
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.write(&snapt.TableStorageNode, headerID, statePath, storageKey, storageCIDStr, node.Path,
|
||||||
|
node.NodeType, false, mhKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// increment storage node counter.
|
||||||
|
atomic.AddUint64(&p.storageNodeCounter, 1)
|
||||||
|
prom.IncStorageNodeCount()
|
||||||
|
|
||||||
|
// increment current batch size counter
|
||||||
|
p.currBatchSize += 2
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishCode writes code to the ipfs backing pg datastore
|
||||||
|
func (p *publisher) PublishCode(codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
|
||||||
|
// no codec for code, doesn't matter though since blockstore key is multihash-derived
|
||||||
|
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(fileTx)
|
||||||
|
if err = tx.write(&snapt.TableIPLDBlock, mhKey, codeBytes); err != nil {
|
||||||
|
return fmt.Errorf("error publishing code IPLD: %v", err)
|
||||||
|
}
|
||||||
|
// increment code node counter.
|
||||||
|
atomic.AddUint64(&p.codeNodeCounter, 1)
|
||||||
|
prom.IncCodeNodeCount()
|
||||||
|
|
||||||
|
p.currBatchSize++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// logNodeCounters periodically logs the number of node processed.
|
||||||
|
func (p *publisher) logNodeCounters() {
|
||||||
|
t := time.NewTicker(logInterval)
|
||||||
|
for range t.C {
|
||||||
|
p.printNodeCounters("progress")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) printNodeCounters(msg string) {
|
||||||
|
logrus.WithFields(logrus.Fields{
|
||||||
|
"runtime": time.Now().Sub(p.startTime).String(),
|
||||||
|
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
|
||||||
|
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
|
||||||
|
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
|
||||||
|
}).Info(msg)
|
||||||
|
}
|
133
pkg/snapshot/file/publisher_test.go
Normal file
133
pkg/snapshot/file/publisher_test.go
Normal file
@ -0,0 +1,133 @@
|
|||||||
|
package publisher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/jackc/pgx/v4"
|
||||||
|
|
||||||
|
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pgConfig = test.DefaultPgConfig
|
||||||
|
nodeInfo = test.DefaultNodeInfo
|
||||||
|
// tables ordered according to fkey depedencies
|
||||||
|
allTables = []*snapt.Table{
|
||||||
|
&snapt.TableIPLDBlock,
|
||||||
|
&snapt.TableNodeInfo,
|
||||||
|
&snapt.TableHeader,
|
||||||
|
&snapt.TableStateNode,
|
||||||
|
&snapt.TableStorageNode,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeFiles(t *testing.T, dir string) *publisher {
|
||||||
|
pub, err := NewPublisher(dir, nodeInfo)
|
||||||
|
test.NoError(t, err)
|
||||||
|
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
|
||||||
|
tx, err := pub.BeginTx()
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
headerID := fixt.Block1_Header.Hash().String()
|
||||||
|
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, tx))
|
||||||
|
|
||||||
|
test.NoError(t, tx.Commit())
|
||||||
|
return pub
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify that we can parse the csvs
|
||||||
|
// TODO check actual data
|
||||||
|
func verifyFileData(t *testing.T, path string, tbl *snapt.Table) {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
test.NoError(t, err)
|
||||||
|
r := csv.NewReader(file)
|
||||||
|
test.NoError(t, err)
|
||||||
|
r.FieldsPerRecord = len(tbl.Columns)
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, err := r.Read()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
test.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriting(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
// tempdir like /tmp/TempFoo/001/, TempFoo defaults to 0700
|
||||||
|
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
|
||||||
|
|
||||||
|
pub := writeFiles(t, dir)
|
||||||
|
|
||||||
|
for _, tbl := range perBlockTables {
|
||||||
|
verifyFileData(t, TableFile(pub.dir, tbl.Name), tbl)
|
||||||
|
}
|
||||||
|
for i := uint32(0); i < pub.txCounter; i++ {
|
||||||
|
for _, tbl := range perNodeTables {
|
||||||
|
verifyFileData(t, TableFile(pub.txDir(i), tbl.Name), tbl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: DB user requires role membership "pg_read_server_files"
|
||||||
|
func TestPgCopy(t *testing.T) {
|
||||||
|
test.NeedsDB(t)
|
||||||
|
|
||||||
|
dir := t.TempDir()
|
||||||
|
test.NoError(t, os.Chmod(filepath.Dir(dir), 0755))
|
||||||
|
pub := writeFiles(t, dir)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
conn, err := pgx.Connect(ctx, pgConfig.DbConnectionString())
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
// clear existing test data
|
||||||
|
pgDeleteTable := `DELETE FROM %s`
|
||||||
|
for _, tbl := range allTables {
|
||||||
|
_, err = conn.Exec(ctx, fmt.Sprintf(pgDeleteTable, tbl.Name))
|
||||||
|
test.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy from files
|
||||||
|
pgCopyStatement := `COPY %s FROM '%s' CSV`
|
||||||
|
for _, tbl := range perBlockTables {
|
||||||
|
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.dir, tbl.Name))
|
||||||
|
_, err = conn.Exec(ctx, stm)
|
||||||
|
test.NoError(t, err)
|
||||||
|
}
|
||||||
|
for i := uint32(0); i < pub.txCounter; i++ {
|
||||||
|
for _, tbl := range perNodeTables {
|
||||||
|
stm := fmt.Sprintf(pgCopyStatement, tbl.Name, TableFile(pub.txDir(i), tbl.Name))
|
||||||
|
_, err = conn.Exec(ctx, stm)
|
||||||
|
test.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check header was successfully committed
|
||||||
|
pgQueryHeader := `SELECT cid, block_hash
|
||||||
|
FROM eth.header_cids
|
||||||
|
WHERE block_number = $1`
|
||||||
|
type res struct {
|
||||||
|
CID string
|
||||||
|
BlockHash string
|
||||||
|
}
|
||||||
|
var header res
|
||||||
|
err = conn.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
|
||||||
|
&header.CID, &header.BlockHash)
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
|
||||||
|
test.NoError(t, err)
|
||||||
|
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
|
||||||
|
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
|
||||||
|
}
|
26
pkg/snapshot/mock/util.go
Normal file
26
pkg/snapshot/mock/util.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package mock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/mock/gomock"
|
||||||
|
)
|
||||||
|
|
||||||
|
type anyOfMatcher struct {
|
||||||
|
values []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m anyOfMatcher) Matches(x interface{}) bool {
|
||||||
|
for _, v := range m.values {
|
||||||
|
if gomock.Eq(v).Matches(x) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func (m anyOfMatcher) String() string {
|
||||||
|
return fmt.Sprintf("is equal to any of %+v", m.values)
|
||||||
|
}
|
||||||
|
func AnyOf(xs ...interface{}) anyOfMatcher {
|
||||||
|
return anyOfMatcher{xs}
|
||||||
|
}
|
244
pkg/snapshot/pg/publisher.go
Normal file
244
pkg/snapshot/pg/publisher.go
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
// Copyright © 2020 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package pg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
dshelp "github.com/ipfs/go-ipfs-ds-help"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ snapt.Publisher = (*publisher)(nil)
|
||||||
|
|
||||||
|
const logInterval = 1 * time.Minute
|
||||||
|
|
||||||
|
// Publisher is wrapper around DB.
|
||||||
|
type publisher struct {
|
||||||
|
db *postgres.DB
|
||||||
|
currBatchSize uint
|
||||||
|
stateNodeCounter uint64
|
||||||
|
storageNodeCounter uint64
|
||||||
|
codeNodeCounter uint64
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPublisher creates Publisher
|
||||||
|
func NewPublisher(db *postgres.DB) *publisher {
|
||||||
|
return &publisher{
|
||||||
|
db: db,
|
||||||
|
startTime: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pubTx struct {
|
||||||
|
sql.Tx
|
||||||
|
callback func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx pubTx) Rollback() error { return tx.Tx.Rollback(context.Background()) }
|
||||||
|
func (tx pubTx) Commit() error {
|
||||||
|
if tx.callback != nil {
|
||||||
|
defer tx.callback()
|
||||||
|
}
|
||||||
|
return tx.Tx.Commit(context.Background())
|
||||||
|
}
|
||||||
|
func (tx pubTx) Exec(sql string, args ...interface{}) (sql.Result, error) {
|
||||||
|
return tx.Tx.Exec(context.Background(), sql, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) BeginTx() (snapt.Tx, error) {
|
||||||
|
tx, err := p.db.Begin(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
go p.logNodeCounters()
|
||||||
|
return pubTx{tx, func() {
|
||||||
|
p.printNodeCounters("final stats")
|
||||||
|
}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishRaw derives a cid from raw bytes and provided codec and multihash type, and writes it to the db tx
|
||||||
|
// returns the CID and blockstore prefixed multihash key
|
||||||
|
func (tx pubTx) publishRaw(codec uint64, raw []byte) (cid, prefixedKey string, err error) {
|
||||||
|
c, err := ipld.RawdataToCid(codec, raw, multihash.KECCAK_256)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cid = c.String()
|
||||||
|
prefixedKey, err = tx.publishIPLD(c, raw)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tx pubTx) publishIPLD(c cid.Cid, raw []byte) (string, error) {
|
||||||
|
dbKey := dshelp.MultihashToDsKey(c.Hash())
|
||||||
|
prefixedKey := blockstore.BlockPrefix.String() + dbKey.String()
|
||||||
|
_, err := tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), prefixedKey, raw)
|
||||||
|
return prefixedKey, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary indexes in the header_cids table
|
||||||
|
func (p *publisher) PublishHeader(header *types.Header) (err error) {
|
||||||
|
headerNode, err := ipld.NewEthHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapTx, err := p.db.Begin(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tx := pubTx{snapTx, nil}
|
||||||
|
defer func() { err = snapt.CommitOrRollback(tx, err) }()
|
||||||
|
|
||||||
|
if _, err = tx.publishIPLD(headerNode.Cid(), headerNode.RawData()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mhKey := shared.MultihashKeyFromCID(headerNode.Cid())
|
||||||
|
_, err = tx.Exec(snapt.TableHeader.ToInsertStatement(), header.Number.Uint64(), header.Hash().Hex(),
|
||||||
|
header.ParentHash.Hex(), headerNode.Cid().String(), "0", p.db.NodeID(), "0",
|
||||||
|
header.Root.Hex(), header.TxHash.Hex(), header.ReceiptHash.Hex(), header.UncleHash.Hex(),
|
||||||
|
header.Bloom.Bytes(), header.Time, mhKey, 0, header.Coinbase.String())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes in the state_cids table
|
||||||
|
func (p *publisher) PublishStateNode(node *snapt.Node, headerID string, snapTx snapt.Tx) error {
|
||||||
|
var stateKey string
|
||||||
|
if !snapt.IsNullHash(node.Key) {
|
||||||
|
stateKey = node.Key.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(pubTx)
|
||||||
|
stateCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStateTrie, node.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Exec(snapt.TableStateNode.ToInsertStatement(),
|
||||||
|
headerID, stateKey, stateCIDStr, node.Path, node.NodeType, false, mhKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// increment state node counter.
|
||||||
|
atomic.AddUint64(&p.stateNodeCounter, 1)
|
||||||
|
prom.IncStateNodeCount()
|
||||||
|
|
||||||
|
// increment current batch size counter
|
||||||
|
p.currBatchSize += 2
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary indexes in the storage_cids table
|
||||||
|
func (p *publisher) PublishStorageNode(node *snapt.Node, headerID string, statePath []byte, snapTx snapt.Tx) error {
|
||||||
|
var storageKey string
|
||||||
|
if !snapt.IsNullHash(node.Key) {
|
||||||
|
storageKey = node.Key.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(pubTx)
|
||||||
|
storageCIDStr, mhKey, err := tx.publishRaw(ipld.MEthStorageTrie, node.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.Exec(snapt.TableStorageNode.ToInsertStatement(),
|
||||||
|
headerID, statePath, storageKey, storageCIDStr, node.Path, node.NodeType, false, mhKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// increment storage node counter.
|
||||||
|
atomic.AddUint64(&p.storageNodeCounter, 1)
|
||||||
|
prom.IncStorageNodeCount()
|
||||||
|
|
||||||
|
// increment current batch size counter
|
||||||
|
p.currBatchSize += 2
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PublishCode writes code to the ipfs backing pg datastore
|
||||||
|
func (p *publisher) PublishCode(codeHash common.Hash, codeBytes []byte, snapTx snapt.Tx) error {
|
||||||
|
// no codec for code, doesn't matter though since blockstore key is multihash-derived
|
||||||
|
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx := snapTx.(pubTx)
|
||||||
|
if _, err = tx.Exec(snapt.TableIPLDBlock.ToInsertStatement(), mhKey, codeBytes); err != nil {
|
||||||
|
return fmt.Errorf("error publishing code IPLD: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// increment code node counter.
|
||||||
|
atomic.AddUint64(&p.codeNodeCounter, 1)
|
||||||
|
prom.IncCodeNodeCount()
|
||||||
|
|
||||||
|
p.currBatchSize++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx, error) {
|
||||||
|
var err error
|
||||||
|
// maximum batch size reached, commit the current transaction and begin a new transaction.
|
||||||
|
if maxBatchSize <= p.currBatchSize {
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapTx, err := p.db.Begin(context.Background())
|
||||||
|
tx = pubTx{Tx: snapTx}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.currBatchSize = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// logNodeCounters periodically logs the number of node processed.
|
||||||
|
func (p *publisher) logNodeCounters() {
|
||||||
|
t := time.NewTicker(logInterval)
|
||||||
|
for range t.C {
|
||||||
|
p.printNodeCounters("progress")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *publisher) printNodeCounters(msg string) {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"runtime": time.Now().Sub(p.startTime).String(),
|
||||||
|
"state nodes": atomic.LoadUint64(&p.stateNodeCounter),
|
||||||
|
"storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
|
||||||
|
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
|
||||||
|
}).Info(msg)
|
||||||
|
}
|
78
pkg/snapshot/pg/publisher_test.go
Normal file
78
pkg/snapshot/pg/publisher_test.go
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
package pg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
||||||
|
"github.com/jackc/pgx/v4"
|
||||||
|
|
||||||
|
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pgConfig = test.DefaultPgConfig
|
||||||
|
nodeInfo = test.DefaultNodeInfo
|
||||||
|
// tables ordered according to fkey depedencies
|
||||||
|
allTables = []*snapt.Table{
|
||||||
|
&snapt.TableIPLDBlock,
|
||||||
|
&snapt.TableNodeInfo,
|
||||||
|
&snapt.TableHeader,
|
||||||
|
&snapt.TableStateNode,
|
||||||
|
&snapt.TableStorageNode,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func writeData(t *testing.T) *publisher {
|
||||||
|
driver, err := postgres.NewPGXDriver(context.Background(), pgConfig, nodeInfo)
|
||||||
|
test.NoError(t, err)
|
||||||
|
pub := NewPublisher(postgres.NewPostgresDB(driver))
|
||||||
|
test.NoError(t, pub.PublishHeader(&fixt.Block1_Header))
|
||||||
|
tx, err := pub.BeginTx()
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
headerID := fixt.Block1_Header.Hash().String()
|
||||||
|
test.NoError(t, pub.PublishStateNode(&fixt.Block1_StateNode0, headerID, tx))
|
||||||
|
|
||||||
|
test.NoError(t, tx.Commit())
|
||||||
|
return pub
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: DB user requires role membership "pg_read_server_files"
|
||||||
|
func TestBasic(t *testing.T) {
|
||||||
|
test.NeedsDB(t)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
conn, err := pgx.Connect(ctx, pgConfig.DbConnectionString())
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
// clear existing test data
|
||||||
|
pgDeleteTable := `DELETE FROM %s`
|
||||||
|
for _, tbl := range allTables {
|
||||||
|
_, err = conn.Exec(ctx, fmt.Sprintf(pgDeleteTable, tbl.Name))
|
||||||
|
test.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = writeData(t)
|
||||||
|
|
||||||
|
// check header was successfully committed
|
||||||
|
pgQueryHeader := `SELECT cid, block_hash
|
||||||
|
FROM eth.header_cids
|
||||||
|
WHERE block_number = $1`
|
||||||
|
type res struct {
|
||||||
|
CID string
|
||||||
|
BlockHash string
|
||||||
|
}
|
||||||
|
var header res
|
||||||
|
err = conn.QueryRow(ctx, pgQueryHeader, fixt.Block1_Header.Number.Uint64()).Scan(
|
||||||
|
&header.CID, &header.BlockHash)
|
||||||
|
test.NoError(t, err)
|
||||||
|
|
||||||
|
headerNode, err := ipld.NewEthHeader(&fixt.Block1_Header)
|
||||||
|
test.ExpectEqual(t, headerNode.Cid().String(), header.CID)
|
||||||
|
test.ExpectEqual(t, fixt.Block1_Header.Hash().String(), header.BlockHash)
|
||||||
|
}
|
@ -16,26 +16,23 @@
|
|||||||
package snapshot
|
package snapshot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/pkg/prom"
|
|
||||||
statediff "github.com/cerc-io/plugeth-statediff"
|
|
||||||
"github.com/cerc-io/plugeth-statediff/adapt"
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer"
|
|
||||||
"github.com/cerc-io/plugeth-statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
iter "github.com/vulcanize/go-eth-state-node-iterator"
|
||||||
|
. "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -51,35 +48,34 @@ var (
|
|||||||
type Service struct {
|
type Service struct {
|
||||||
ethDB ethdb.Database
|
ethDB ethdb.Database
|
||||||
stateDB state.Database
|
stateDB state.Database
|
||||||
indexer indexer.Indexer
|
ipfsPublisher Publisher
|
||||||
maxBatchSize uint
|
maxBatchSize uint
|
||||||
|
tracker iteratorTracker
|
||||||
recoveryFile string
|
recoveryFile string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEthDB(con *EthDBConfig) (ethdb.Database, error) {
|
func NewLevelDB(con *EthConfig) (ethdb.Database, error) {
|
||||||
return rawdb.Open(rawdb.OpenOptions{
|
edb, err := rawdb.NewLevelDBDatabaseWithFreezer(
|
||||||
Directory: con.DBPath,
|
con.LevelDBPath, 1024, 256, con.AncientDBPath, "ipld-eth-state-snapshot", true,
|
||||||
AncientsDirectory: con.AncientDBPath,
|
)
|
||||||
Namespace: "ipld-eth-state-snapshot",
|
if err != nil {
|
||||||
Cache: 1024,
|
return nil, fmt.Errorf("unable to create NewLevelDBDatabaseWithFreezer: %s", err)
|
||||||
Handles: 256,
|
}
|
||||||
ReadOnly: true,
|
return edb, nil
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSnapshotService creates Service.
|
// NewSnapshotService creates Service.
|
||||||
func NewSnapshotService(edb ethdb.Database, indexer indexer.Indexer, recoveryFile string) (*Service, error) {
|
func NewSnapshotService(edb ethdb.Database, pub Publisher, recoveryFile string) (*Service, error) {
|
||||||
return &Service{
|
return &Service{
|
||||||
ethDB: edb,
|
ethDB: edb,
|
||||||
stateDB: state.NewDatabase(edb),
|
stateDB: state.NewDatabase(edb),
|
||||||
indexer: indexer,
|
ipfsPublisher: pub,
|
||||||
maxBatchSize: defaultBatchSize,
|
maxBatchSize: defaultBatchSize,
|
||||||
recoveryFile: recoveryFile,
|
recoveryFile: recoveryFile,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotParams struct {
|
type SnapshotParams struct {
|
||||||
WatchedAddresses []common.Address
|
|
||||||
Height uint64
|
Height uint64
|
||||||
Workers uint
|
Workers uint
|
||||||
}
|
}
|
||||||
@ -87,86 +83,266 @@ type SnapshotParams struct {
|
|||||||
func (s *Service) CreateSnapshot(params SnapshotParams) error {
|
func (s *Service) CreateSnapshot(params SnapshotParams) error {
|
||||||
// extract header from lvldb and publish to PG-IPFS
|
// extract header from lvldb and publish to PG-IPFS
|
||||||
// hold onto the headerID so that we can link the state nodes to this header
|
// hold onto the headerID so that we can link the state nodes to this header
|
||||||
|
log.Infof("Creating snapshot at height %d", params.Height)
|
||||||
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
|
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
|
||||||
header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
|
header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
|
||||||
if header == nil {
|
if header == nil {
|
||||||
return fmt.Errorf("unable to read canonical header at height %d", params.Height)
|
return fmt.Errorf("unable to read canonical header at height %d", params.Height)
|
||||||
}
|
}
|
||||||
log.WithField("height", params.Height).WithField("hash", hash).Info("Creating snapshot")
|
|
||||||
|
|
||||||
// Context for snapshot work
|
log.Infof("head hash: %s head height: %d", hash.Hex(), params.Height)
|
||||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
|
||||||
defer cancelCtx()
|
|
||||||
// Cancel context on receiving a signal. On cancellation, all tracked iterators complete
|
|
||||||
// processing of their current node before stopping.
|
|
||||||
captureSignal(cancelCtx)
|
|
||||||
|
|
||||||
var err error
|
err := s.ipfsPublisher.PublishHeader(header)
|
||||||
tx := s.indexer.BeginTx(header.Number, ctx)
|
|
||||||
defer tx.RollbackOnFailure(err)
|
|
||||||
|
|
||||||
var headerid string
|
|
||||||
headerid, err = s.indexer.PushHeader(tx, header, big.NewInt(0), big.NewInt(0))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tr := prom.NewTracker(s.recoveryFile, params.Workers)
|
tree, err := s.stateDB.OpenTrie(header.Root)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headerID := header.Hash().String()
|
||||||
|
s.tracker = newTracker(s.recoveryFile, int(params.Workers))
|
||||||
|
s.tracker.captureSignal()
|
||||||
|
|
||||||
|
var iters []trie.NodeIterator
|
||||||
|
// attempt to restore from recovery file if it exists
|
||||||
|
iters, err = s.tracker.restore(tree)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("restore error: %s", err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if iters != nil {
|
||||||
|
log.Debugf("restored iterators; count: %d", len(iters))
|
||||||
|
if params.Workers < uint(len(iters)) {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"number of recovered workers (%d) is greater than number configured (%d)",
|
||||||
|
len(iters), params.Workers,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else { // nothing to restore
|
||||||
|
log.Debugf("no iterators to restore")
|
||||||
|
if params.Workers > 1 {
|
||||||
|
iters = iter.SubtrieIterators(tree, params.Workers)
|
||||||
|
} else {
|
||||||
|
iters = []trie.NodeIterator{tree.NodeIterator(nil)}
|
||||||
|
}
|
||||||
|
for i, it := range iters {
|
||||||
|
iters[i] = s.tracker.tracked(it)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
err := tr.CloseAndSave()
|
err := s.tracker.haltAndDump()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to write recovery file: %v", err)
|
log.Errorf("failed to write recovery file: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var nodeMtx, ipldMtx sync.Mutex
|
if len(iters) > 0 {
|
||||||
nodeSink := func(node types.StateLeafNode) error {
|
return s.createSnapshotAsync(iters, headerID)
|
||||||
nodeMtx.Lock()
|
} else {
|
||||||
defer nodeMtx.Unlock()
|
return s.createSnapshot(iters[0], headerID)
|
||||||
prom.IncStateNodeCount()
|
|
||||||
prom.AddStorageNodeCount(len(node.StorageDiff))
|
|
||||||
return s.indexer.PushStateNode(tx, node, headerid)
|
|
||||||
}
|
}
|
||||||
ipldSink := func(c types.IPLD) error {
|
|
||||||
ipldMtx.Lock()
|
|
||||||
defer ipldMtx.Unlock()
|
|
||||||
return s.indexer.PushIPLD(tx, c)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sdparams := statediff.Params{
|
// Create snapshot up to head (ignores height param)
|
||||||
WatchedAddresses: params.WatchedAddresses,
|
func (s *Service) CreateLatestSnapshot(workers uint) error {
|
||||||
}
|
|
||||||
sdparams.ComputeWatchedAddressesLeafPaths()
|
|
||||||
builder := statediff.NewBuilder(adapt.GethStateView(s.stateDB))
|
|
||||||
builder.SetSubtrieWorkers(params.Workers)
|
|
||||||
if err = builder.WriteStateSnapshot(ctx, header.Root, sdparams, nodeSink, ipldSink, tr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = tx.Submit(); err != nil {
|
|
||||||
return fmt.Errorf("batch transaction submission failed: %w", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateLatestSnapshot snapshot at head (ignores height param)
|
|
||||||
func (s *Service) CreateLatestSnapshot(workers uint, watchedAddresses []common.Address) error {
|
|
||||||
log.Info("Creating snapshot at head")
|
log.Info("Creating snapshot at head")
|
||||||
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
|
hash := rawdb.ReadHeadHeaderHash(s.ethDB)
|
||||||
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
|
height := rawdb.ReadHeaderNumber(s.ethDB, hash)
|
||||||
if height == nil {
|
if height == nil {
|
||||||
return fmt.Errorf("unable to read header height for header hash %s", hash)
|
return fmt.Errorf("unable to read header height for header hash %s", hash.String())
|
||||||
}
|
}
|
||||||
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers, WatchedAddresses: watchedAddresses})
|
return s.CreateSnapshot(SnapshotParams{Height: *height, Workers: workers})
|
||||||
}
|
}
|
||||||
|
|
||||||
func captureSignal(cb func()) {
|
type nodeResult struct {
|
||||||
sigChan := make(chan os.Signal, 1)
|
node Node
|
||||||
|
elements []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
func resolveNode(it trie.NodeIterator, trieDB *trie.Database) (*nodeResult, error) {
|
||||||
|
// "leaf" nodes are actually "value" nodes, whose parents are the actual leaves
|
||||||
|
if it.Leaf() {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if IsNullHash(it.Hash()) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
path := make([]byte, len(it.Path()))
|
||||||
|
copy(path, it.Path())
|
||||||
|
n, err := trieDB.Node(it.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var elements []interface{}
|
||||||
|
if err := rlp.DecodeBytes(n, &elements); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ty, err := CheckKeyType(elements)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &nodeResult{
|
||||||
|
node: Node{
|
||||||
|
NodeType: ty,
|
||||||
|
Path: path,
|
||||||
|
Value: n,
|
||||||
|
},
|
||||||
|
elements: elements,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) createSnapshot(it trie.NodeIterator, headerID string) error {
|
||||||
|
tx, err := s.ipfsPublisher.BeginTx()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { err = CommitOrRollback(tx, err) }()
|
||||||
|
|
||||||
|
for it.Next(true) {
|
||||||
|
res, err := resolveNode(it, s.stateDB.TrieDB())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if res == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch res.node.NodeType {
|
||||||
|
case Leaf:
|
||||||
|
// if the node is a leaf, decode the account and publish the associated storage trie
|
||||||
|
// nodes if there are any
|
||||||
|
var account types.StateAccount
|
||||||
|
if err := rlp.DecodeBytes(res.elements[1].([]byte), &account); err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"error decoding account for leaf node at path %x nerror: %v", res.node.Path, err)
|
||||||
|
}
|
||||||
|
partialPath := trie.CompactToHex(res.elements[0].([]byte))
|
||||||
|
valueNodePath := append(res.node.Path, partialPath...)
|
||||||
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
|
leafKey := encodedPath[1:]
|
||||||
|
res.node.Key = common.BytesToHash(leafKey)
|
||||||
|
err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, tx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// publish any non-nil code referenced by codehash
|
||||||
|
if !bytes.Equal(account.CodeHash, emptyCodeHash) {
|
||||||
|
codeHash := common.BytesToHash(account.CodeHash)
|
||||||
|
codeBytes := rawdb.ReadCode(s.ethDB, codeHash)
|
||||||
|
if len(codeBytes) == 0 {
|
||||||
|
log.Error("Code is missing", "account", common.BytesToHash(it.LeafKey()))
|
||||||
|
return errors.New("missing code")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = s.ipfsPublisher.PublishCode(codeHash, codeBytes, tx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tx, err = s.storageSnapshot(account.Root, headerID, res.node.Path, tx); err != nil {
|
||||||
|
return fmt.Errorf("failed building storage snapshot for account %+v\r\nerror: %w", account, err)
|
||||||
|
}
|
||||||
|
case Extension, Branch:
|
||||||
|
res.node.Key = common.BytesToHash([]byte{})
|
||||||
|
if err := s.ipfsPublisher.PublishStateNode(&res.node, headerID, tx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.New("unexpected node type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return it.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Full-trie concurrent snapshot
|
||||||
|
func (s *Service) createSnapshotAsync(iters []trie.NodeIterator, headerID string) error {
|
||||||
|
errors := make(chan error)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for _, it := range iters {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(it trie.NodeIterator) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := s.createSnapshot(it, headerID); err != nil {
|
||||||
|
errors <- err
|
||||||
|
}
|
||||||
|
}(it)
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
sig := <-sigChan
|
wg.Wait()
|
||||||
log.Errorf("Signal received (%v), stopping", sig)
|
done <- struct{}{}
|
||||||
cb()
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
select {
|
||||||
|
case err = <-errors:
|
||||||
|
case <-done:
|
||||||
|
close(errors)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) storageSnapshot(sr common.Hash, headerID string, statePath []byte, tx Tx) (Tx, error) {
|
||||||
|
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
||||||
|
return tx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sTrie, err := s.stateDB.OpenTrie(sr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
it := sTrie.NodeIterator(make([]byte, 0))
|
||||||
|
for it.Next(true) {
|
||||||
|
res, err := resolveNode(it, s.stateDB.TrieDB())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if res == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err = s.ipfsPublisher.PrepareTxForBatch(tx, s.maxBatchSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodeData []byte
|
||||||
|
nodeData, err = s.stateDB.TrieDB().Node(it.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.node.Value = nodeData
|
||||||
|
|
||||||
|
switch res.node.NodeType {
|
||||||
|
case Leaf:
|
||||||
|
partialPath := trie.CompactToHex(res.elements[0].([]byte))
|
||||||
|
valueNodePath := append(res.node.Path, partialPath...)
|
||||||
|
encodedPath := trie.HexToCompact(valueNodePath)
|
||||||
|
leafKey := encodedPath[1:]
|
||||||
|
res.node.Key = common.BytesToHash(leafKey)
|
||||||
|
case Extension, Branch:
|
||||||
|
res.node.Key = common.BytesToHash([]byte{})
|
||||||
|
default:
|
||||||
|
return nil, errors.New("unexpected node type")
|
||||||
|
}
|
||||||
|
if err = s.ipfsPublisher.PublishStorageNode(&res.node, headerID, statePath, tx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx, it.Error()
|
||||||
}
|
}
|
||||||
|
@ -1,269 +1,141 @@
|
|||||||
package snapshot_test
|
package snapshot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"errors"
|
||||||
"math/rand"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cerc-io/eth-testing/chains"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/cerc-io/ipld-eth-state-snapshot/internal/mocks"
|
fixt "github.com/vulcanize/ipld-eth-state-snapshot/fixture"
|
||||||
. "github.com/cerc-io/ipld-eth-state-snapshot/pkg/snapshot"
|
mock "github.com/vulcanize/ipld-eth-state-snapshot/mocks/snapshot"
|
||||||
fixture "github.com/cerc-io/ipld-eth-state-snapshot/test"
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
func testConfig(leveldbpath, ancientdbpath string) *Config {
|
||||||
rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
// Note: block 1 doesn't have storage nodes. TODO: add fixtures with storage nodes
|
|
||||||
// chainAblock1StateKeys = sliceToSet(fixture.ChainA_Block1_StateNodeLeafKeys)
|
|
||||||
chainAblock1IpldCids = sliceToSet(fixture.ChainA_Block1_IpldCids)
|
|
||||||
|
|
||||||
subtrieWorkerCases = []uint{1, 4, 8, 16, 32}
|
|
||||||
)
|
|
||||||
|
|
||||||
type selectiveData struct {
|
|
||||||
StateNodes map[string]*models.StateNodeModel
|
|
||||||
StorageNodes map[string]map[string]*models.StorageNodeModel
|
|
||||||
}
|
|
||||||
|
|
||||||
func testConfig(ethdbpath, ancientdbpath string) *Config {
|
|
||||||
return &Config{
|
return &Config{
|
||||||
Eth: &EthDBConfig{
|
Eth: &EthConfig{
|
||||||
DBPath: ethdbpath,
|
LevelDBPath: leveldbpath,
|
||||||
AncientDBPath: ancientdbpath,
|
AncientDBPath: ancientdbpath,
|
||||||
NodeInfo: DefaultNodeInfo,
|
NodeInfo: test.DefaultNodeInfo,
|
||||||
|
},
|
||||||
|
DB: &DBConfig{
|
||||||
|
URI: test.DefaultPgConfig.DbConnectionString(),
|
||||||
|
ConnConfig: test.DefaultPgConfig,
|
||||||
},
|
},
|
||||||
DB: &DefaultPgConfig,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSnapshot(t *testing.T) {
|
func makeMocks(t *testing.T) (*mock.MockPublisher, *mock.MockTx) {
|
||||||
runCase := func(t *testing.T, workers uint) {
|
ctl := gomock.NewController(t)
|
||||||
params := SnapshotParams{Height: 1, Workers: workers}
|
pub := mock.NewMockPublisher(ctl)
|
||||||
data := doSnapshot(t, fixture.ChainA, params)
|
tx := mock.NewMockTx(ctl)
|
||||||
verify_chainAblock1(t, data)
|
return pub, tx
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range subtrieWorkerCases {
|
func TestCreateSnapshot(t *testing.T) {
|
||||||
t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
|
runCase := func(t *testing.T, workers int) {
|
||||||
}
|
pub, tx := makeMocks(t)
|
||||||
}
|
pub.EXPECT().PublishHeader(gomock.Eq(&fixt.Block1_Header))
|
||||||
|
pub.EXPECT().BeginTx().Return(tx, nil).
|
||||||
|
Times(workers)
|
||||||
|
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).
|
||||||
|
AnyTimes()
|
||||||
|
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||||
|
// Use MinTimes as duplicate nodes are expected at boundaries
|
||||||
|
MinTimes(len(fixt.Block1_StateNodePaths))
|
||||||
|
|
||||||
func TestAccountSelectiveSnapshot(t *testing.T) {
|
// TODO: fixtures for storage node
|
||||||
height := uint64(32)
|
// pub.EXPECT().PublishStorageNode(gomock.Eq(fixt.StorageNode), gomock.Eq(int64(0)), gomock.Any())
|
||||||
watchedAddresses, expected := watchedAccountData_chainBblock32()
|
|
||||||
|
|
||||||
runCase := func(t *testing.T, workers uint) {
|
tx.EXPECT().Commit().
|
||||||
params := SnapshotParams{
|
Times(workers)
|
||||||
Height: height,
|
|
||||||
Workers: workers,
|
|
||||||
WatchedAddresses: watchedAddresses,
|
|
||||||
}
|
|
||||||
data := doSnapshot(t, fixture.ChainB, params)
|
|
||||||
expected.verify(t, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range subtrieWorkerCases {
|
config := testConfig(fixt.ChaindataPath, fixt.AncientdataPath)
|
||||||
t.Run(fmt.Sprintf("with %d subtries", tc), func(t *testing.T) { runCase(t, tc) })
|
edb, err := NewLevelDB(config.Eth)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func TestSnapshotRecovery(t *testing.T) {
|
|
||||||
runCase := func(t *testing.T, workers uint, interruptAt uint) {
|
|
||||||
params := SnapshotParams{Height: 1, Workers: workers}
|
|
||||||
data := doSnapshotWithRecovery(t, fixture.ChainA, params, interruptAt)
|
|
||||||
verify_chainAblock1(t, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
interrupts := make([]uint, 4)
|
|
||||||
for i := 0; i < len(interrupts); i++ {
|
|
||||||
N := len(fixture.ChainA_Block1_StateNodeLeafKeys)
|
|
||||||
interrupts[i] = uint(rand.Intn(N/2) + N/4)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range subtrieWorkerCases {
|
|
||||||
for i, interrupt := range interrupts {
|
|
||||||
t.Run(
|
|
||||||
fmt.Sprintf("with %d subtries %d", tc, i),
|
|
||||||
func(t *testing.T) { runCase(t, tc, interrupt) },
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccountSelectiveSnapshotRecovery(t *testing.T) {
|
|
||||||
height := uint64(32)
|
|
||||||
watchedAddresses, expected := watchedAccountData_chainBblock32()
|
|
||||||
|
|
||||||
runCase := func(t *testing.T, workers uint, interruptAt uint) {
|
|
||||||
params := SnapshotParams{
|
|
||||||
Height: height,
|
|
||||||
Workers: workers,
|
|
||||||
WatchedAddresses: watchedAddresses,
|
|
||||||
}
|
|
||||||
data := doSnapshotWithRecovery(t, fixture.ChainB, params, interruptAt)
|
|
||||||
expected.verify(t, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range subtrieWorkerCases {
|
|
||||||
t.Run(
|
|
||||||
fmt.Sprintf("with %d subtries", tc),
|
|
||||||
func(t *testing.T) { runCase(t, tc, 1) },
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verify_chainAblock1(t *testing.T, data mocks.IndexerData) {
|
|
||||||
// Extract indexed keys and sort them for comparison
|
|
||||||
var indexedStateKeys []string
|
|
||||||
for _, stateNode := range data.StateNodes {
|
|
||||||
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
|
|
||||||
indexedStateKeys = append(indexedStateKeys, stateKey)
|
|
||||||
}
|
|
||||||
require.ElementsMatch(t, fixture.ChainA_Block1_StateNodeLeafKeys, indexedStateKeys)
|
|
||||||
|
|
||||||
ipldCids := make(map[string]struct{})
|
|
||||||
for _, ipld := range data.IPLDs {
|
|
||||||
ipldCids[ipld.CID] = struct{}{}
|
|
||||||
}
|
|
||||||
require.Equal(t, chainAblock1IpldCids, ipldCids)
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchedAccountData_chainBblock32() ([]common.Address, selectiveData) {
|
|
||||||
watchedAddresses := []common.Address{
|
|
||||||
// hash 0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b
|
|
||||||
common.HexToAddress("0x825a6eec09e44Cb0fa19b84353ad0f7858d7F61a"),
|
|
||||||
// hash 0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d
|
|
||||||
common.HexToAddress("0x0616F59D291a898e796a1FAD044C5926ed2103eC"),
|
|
||||||
}
|
|
||||||
var expected selectiveData
|
|
||||||
expected.StateNodes = make(map[string]*models.StateNodeModel)
|
|
||||||
for _, index := range []int{0, 4} {
|
|
||||||
node := &fixture.ChainB_Block32_StateNodes[index]
|
|
||||||
expected.StateNodes[node.StateKey] = node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map account leaf keys to corresponding storage
|
|
||||||
expectedStorageNodeIndexes := []struct {
|
|
||||||
address common.Address
|
|
||||||
indexes []int
|
|
||||||
}{
|
|
||||||
{watchedAddresses[0], []int{9, 11}},
|
|
||||||
{watchedAddresses[1], []int{0, 1, 2, 4, 6}},
|
|
||||||
}
|
|
||||||
expected.StorageNodes = make(map[string]map[string]*models.StorageNodeModel)
|
|
||||||
for _, account := range expectedStorageNodeIndexes {
|
|
||||||
leafKey := crypto.Keccak256Hash(account.address[:]).String()
|
|
||||||
storageNodes := make(map[string]*models.StorageNodeModel)
|
|
||||||
for _, index := range account.indexes {
|
|
||||||
node := &fixture.ChainB_Block32_StorageNodes[index]
|
|
||||||
storageNodes[node.StorageKey] = node
|
|
||||||
}
|
|
||||||
expected.StorageNodes[leafKey] = storageNodes
|
|
||||||
}
|
|
||||||
return watchedAddresses, expected
|
|
||||||
}
|
|
||||||
|
|
||||||
func (expected selectiveData) verify(t *testing.T, data mocks.IndexerData) {
|
|
||||||
// check that all indexed nodes are expected and correct
|
|
||||||
indexedStateKeys := make(map[string]struct{})
|
|
||||||
for _, stateNode := range data.StateNodes {
|
|
||||||
stateKey := common.BytesToHash(stateNode.AccountWrapper.LeafKey).String()
|
|
||||||
indexedStateKeys[stateKey] = struct{}{}
|
|
||||||
require.Contains(t, expected.StateNodes, stateKey, "unexpected state node")
|
|
||||||
|
|
||||||
model := expected.StateNodes[stateKey]
|
|
||||||
require.Equal(t, model.CID, stateNode.AccountWrapper.CID)
|
|
||||||
require.Equal(t, model.Balance, stateNode.AccountWrapper.Account.Balance.String())
|
|
||||||
require.Equal(t, model.StorageRoot, stateNode.AccountWrapper.Account.Root.String())
|
|
||||||
|
|
||||||
expectedStorage := expected.StorageNodes[stateKey]
|
|
||||||
indexedStorageKeys := make(map[string]struct{})
|
|
||||||
for _, storageNode := range stateNode.StorageDiff {
|
|
||||||
storageKey := common.BytesToHash(storageNode.LeafKey).String()
|
|
||||||
indexedStorageKeys[storageKey] = struct{}{}
|
|
||||||
require.Contains(t, expectedStorage, storageKey, "unexpected storage node")
|
|
||||||
|
|
||||||
require.Equal(t, expectedStorage[storageKey].CID, storageNode.CID)
|
|
||||||
require.Equal(t, expectedStorage[storageKey].Value, storageNode.Value)
|
|
||||||
}
|
|
||||||
// check for completeness
|
|
||||||
for storageNode := range expectedStorage {
|
|
||||||
require.Contains(t, indexedStorageKeys, storageNode, "missing storage node")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// check for completeness
|
|
||||||
for stateNode := range expected.StateNodes {
|
|
||||||
require.Contains(t, indexedStateKeys, stateNode, "missing state node")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func doSnapshot(t *testing.T, chain *chains.Paths, params SnapshotParams) mocks.IndexerData {
|
|
||||||
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
|
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
|
||||||
edb, err := NewEthDB(config.Eth)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer edb.Close()
|
defer edb.Close()
|
||||||
|
|
||||||
idx := mocks.NewIndexer(t)
|
|
||||||
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
||||||
service, err := NewSnapshotService(edb, idx, recovery)
|
service, err := NewSnapshotService(edb, pub, recovery)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
err = service.CreateSnapshot(params)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return idx.IndexerData
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func doSnapshotWithRecovery(
|
params := SnapshotParams{Height: 1, Workers: uint(workers)}
|
||||||
t *testing.T,
|
err = service.CreateSnapshot(params)
|
||||||
chain *chains.Paths,
|
if err != nil {
|
||||||
params SnapshotParams,
|
t.Fatal(err)
|
||||||
failAfter uint,
|
}
|
||||||
) mocks.IndexerData {
|
}
|
||||||
chainDataPath, ancientDataPath := chain.ChainData, chain.Ancient
|
|
||||||
config := testConfig(chainDataPath, ancientDataPath)
|
testCases := []int{1, 4, 16, 32}
|
||||||
edb, err := NewEthDB(config.Eth)
|
for _, tc := range testCases {
|
||||||
require.NoError(t, err)
|
t.Run("case", func(t *testing.T) { runCase(t, tc) })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func failingPublishStateNode(_ *snapt.Node, _ string, _ snapt.Tx) error {
|
||||||
|
return errors.New("failingPublishStateNode")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRecovery(t *testing.T) {
|
||||||
|
runCase := func(t *testing.T, workers int) {
|
||||||
|
pub, tx := makeMocks(t)
|
||||||
|
pub.EXPECT().PublishHeader(gomock.Any()).AnyTimes()
|
||||||
|
pub.EXPECT().BeginTx().Return(tx, nil).AnyTimes()
|
||||||
|
pub.EXPECT().PrepareTxForBatch(gomock.Any(), gomock.Any()).Return(tx, nil).AnyTimes()
|
||||||
|
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any()).
|
||||||
|
Times(workers).
|
||||||
|
DoAndReturn(failingPublishStateNode)
|
||||||
|
tx.EXPECT().Commit().AnyTimes()
|
||||||
|
|
||||||
|
config := testConfig(fixt.ChaindataPath, fixt.AncientdataPath)
|
||||||
|
edb, err := NewLevelDB(config.Eth)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer edb.Close()
|
defer edb.Close()
|
||||||
|
|
||||||
indexer := &mocks.InterruptingIndexer{
|
recovery := filepath.Join(t.TempDir(), "recover.csv")
|
||||||
Indexer: mocks.NewIndexer(t),
|
service, err := NewSnapshotService(edb, pub, recovery)
|
||||||
InterruptAfter: failAfter,
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Logf("Will interrupt after %d state nodes", failAfter)
|
|
||||||
|
|
||||||
recoveryFile := filepath.Join(t.TempDir(), "recover.csv")
|
params := SnapshotParams{Height: 1, Workers: uint(workers)}
|
||||||
service, err := NewSnapshotService(edb, indexer, recoveryFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = service.CreateSnapshot(params)
|
err = service.CreateSnapshot(params)
|
||||||
require.Error(t, err)
|
if err == nil {
|
||||||
|
t.Fatal("expected an error")
|
||||||
|
}
|
||||||
|
|
||||||
require.FileExists(t, recoveryFile)
|
if _, err = os.Stat(recovery); err != nil {
|
||||||
// We should only have processed nodes up to the break, plus an extra node per worker
|
t.Fatal("cannot stat recovery file:", err)
|
||||||
require.LessOrEqual(t, len(indexer.StateNodes), int(indexer.InterruptAfter+params.Workers))
|
}
|
||||||
|
|
||||||
// use the nested mock indexer, to continue where it left off
|
pub.EXPECT().PublishStateNode(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
|
||||||
recoveryIndexer := indexer.Indexer
|
|
||||||
service, err = NewSnapshotService(edb, recoveryIndexer, recoveryFile)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = service.CreateSnapshot(params)
|
err = service.CreateSnapshot(params)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
return recoveryIndexer.IndexerData
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func sliceToSet[T comparable](slice []T) map[T]struct{} {
|
_, err = os.Stat(recovery)
|
||||||
set := make(map[T]struct{})
|
if err == nil {
|
||||||
for _, v := range slice {
|
t.Fatal("recovery file still present")
|
||||||
set[v] = struct{}{}
|
} else {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return set
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []int{1, 4, 32}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run("case", func(t *testing.T) { runCase(t, tc) })
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
163
pkg/snapshot/tracker.go
Normal file
163
pkg/snapshot/tracker.go
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
package snapshot
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
iter "github.com/vulcanize/go-eth-state-node-iterator"
|
||||||
|
)
|
||||||
|
|
||||||
|
type trackedIter struct {
|
||||||
|
trie.NodeIterator
|
||||||
|
tracker *iteratorTracker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *trackedIter) Next(descend bool) bool {
|
||||||
|
ret := it.NodeIterator.Next(descend)
|
||||||
|
if !ret {
|
||||||
|
if it.tracker.running {
|
||||||
|
it.tracker.stopChan <- it
|
||||||
|
} else {
|
||||||
|
log.Errorf("iterator stopped after tracker halted: path=%x", it.Path())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type iteratorTracker struct {
|
||||||
|
recoveryFile string
|
||||||
|
|
||||||
|
startChan chan *trackedIter
|
||||||
|
stopChan chan *trackedIter
|
||||||
|
started map[*trackedIter]struct{}
|
||||||
|
stopped []*trackedIter
|
||||||
|
running bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTracker(file string, buf int) iteratorTracker {
|
||||||
|
return iteratorTracker{
|
||||||
|
recoveryFile: file,
|
||||||
|
startChan: make(chan *trackedIter, buf),
|
||||||
|
stopChan: make(chan *trackedIter, buf),
|
||||||
|
started: map[*trackedIter]struct{}{},
|
||||||
|
running: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *iteratorTracker) captureSignal() {
|
||||||
|
sigChan := make(chan os.Signal, 1)
|
||||||
|
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
sig := <-sigChan
|
||||||
|
log.Errorf("Signal received (%v), stopping", sig)
|
||||||
|
tr.haltAndDump()
|
||||||
|
os.Exit(1)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wraps an iterator in a trackedIter. This should not be called once halts are possible.
|
||||||
|
func (tr *iteratorTracker) tracked(it trie.NodeIterator) (ret *trackedIter) {
|
||||||
|
ret = &trackedIter{it, tr}
|
||||||
|
tr.startChan <- ret
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumps iterator path and bounds to a text file so it can be restored later
|
||||||
|
func (tr *iteratorTracker) dump() error {
|
||||||
|
log.Debug("Dumping recovery state to: ", tr.recoveryFile)
|
||||||
|
var rows [][]string
|
||||||
|
for it, _ := range tr.started {
|
||||||
|
var endPath []byte
|
||||||
|
if impl, ok := it.NodeIterator.(*iter.PrefixBoundIterator); ok {
|
||||||
|
endPath = impl.EndPath
|
||||||
|
}
|
||||||
|
rows = append(rows, []string{
|
||||||
|
fmt.Sprintf("%x", it.Path()),
|
||||||
|
fmt.Sprintf("%x", endPath),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
file, err := os.Create(tr.recoveryFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
out := csv.NewWriter(file)
|
||||||
|
return out.WriteAll(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
// attempts to read iterator state from file
|
||||||
|
// if file doesn't exist, returns an empty slice with no error
|
||||||
|
func (tr *iteratorTracker) restore(tree state.Trie) ([]trie.NodeIterator, error) {
|
||||||
|
file, err := os.Open(tr.recoveryFile)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debug("Restoring recovery state from: ", tr.recoveryFile)
|
||||||
|
defer file.Close()
|
||||||
|
in := csv.NewReader(file)
|
||||||
|
in.FieldsPerRecord = 2
|
||||||
|
rows, err := in.ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var ret []trie.NodeIterator
|
||||||
|
for _, row := range rows {
|
||||||
|
// pick up where each interval left off
|
||||||
|
var paths [2][]byte
|
||||||
|
for i, val := range row {
|
||||||
|
if len(val) != 0 {
|
||||||
|
if _, err = fmt.Sscanf(val, "%x", &paths[i]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force the lower bound path to an even length
|
||||||
|
if len(paths[0])&0b1 == 1 {
|
||||||
|
decrementPath(paths[0]) // decrement first to avoid skipped nodes
|
||||||
|
paths[0] = append(paths[0], 0)
|
||||||
|
}
|
||||||
|
it := iter.NewPrefixBoundIterator(tree.NodeIterator(iter.HexToKeyBytes(paths[0])), paths[1])
|
||||||
|
ret = append(ret, tr.tracked(it))
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *iteratorTracker) haltAndDump() error {
|
||||||
|
tr.running = false
|
||||||
|
|
||||||
|
// drain any pending events
|
||||||
|
close(tr.startChan)
|
||||||
|
for start := range tr.startChan {
|
||||||
|
tr.started[start] = struct{}{}
|
||||||
|
}
|
||||||
|
close(tr.stopChan)
|
||||||
|
for stop := range tr.stopChan {
|
||||||
|
tr.stopped = append(tr.stopped, stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, stop := range tr.stopped {
|
||||||
|
delete(tr.started, stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tr.started) == 0 {
|
||||||
|
// if the tracker state is empty, erase any existing recovery file
|
||||||
|
err := os.Remove(tr.recoveryFile)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tr.dump()
|
||||||
|
}
|
@ -1,76 +1,53 @@
|
|||||||
package snapshot
|
package snapshot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
|
||||||
|
"github.com/vulcanize/ipld-eth-state-snapshot/pkg/prom"
|
||||||
|
file "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/file"
|
||||||
|
pg "github.com/vulcanize/ipld-eth-state-snapshot/pkg/snapshot/pg"
|
||||||
|
snapt "github.com/vulcanize/ipld-eth-state-snapshot/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Estimate the number of iterations necessary to step from start to end.
|
func NewPublisher(mode SnapshotMode, config *Config) (snapt.Publisher, error) {
|
||||||
func estimateSteps(start []byte, end []byte, depth int) uint64 {
|
switch mode {
|
||||||
// We see paths in several forms (nil, 0600, 06, etc.). We need to adjust them to a comparable form.
|
case PgSnapshot:
|
||||||
// For nil, start and end indicate the extremes of 0x0 and 0x10. For differences in depth, we often see a
|
driver, err := postgres.NewPGXDriver(context.Background(), config.DB.ConnConfig, config.Eth.NodeInfo)
|
||||||
// start/end range on a bounded iterator specified like 0500:0600, while the value returned by it.Path() may
|
if err != nil {
|
||||||
// be shorter, like 06. Since our goal is to estimate how many steps it would take to move from start to end,
|
return nil, err
|
||||||
// we want to perform the comparison at a stable depth, since to move from 05 to 06 is only 1 step, but
|
|
||||||
// to move from 0500:06 is 16.
|
|
||||||
normalizePathRange := func(start []byte, end []byte, depth int) ([]byte, []byte) {
|
|
||||||
if 0 == len(start) {
|
|
||||||
start = []byte{0x0}
|
|
||||||
}
|
|
||||||
if 0 == len(end) {
|
|
||||||
end = []byte{0x10}
|
|
||||||
}
|
|
||||||
normalizedStart := make([]byte, depth)
|
|
||||||
normalizedEnd := make([]byte, depth)
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
if i < len(start) {
|
|
||||||
normalizedStart[i] = start[i]
|
|
||||||
}
|
|
||||||
if i < len(end) {
|
|
||||||
normalizedEnd[i] = end[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return normalizedStart, normalizedEnd
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have no need to handle negative exponents, so uints are fine.
|
prom.RegisterDBCollector(config.DB.ConnConfig.DatabaseName, driver)
|
||||||
pow := func(x uint64, y uint) uint64 {
|
|
||||||
if 0 == y {
|
return pg.NewPublisher(postgres.NewPostgresDB(driver)), nil
|
||||||
return 1
|
case FileSnapshot:
|
||||||
|
return file.NewPublisher(config.File.OutputDir, config.Eth.NodeInfo)
|
||||||
}
|
}
|
||||||
ret := x
|
return nil, fmt.Errorf("invalid snapshot mode: %s", mode)
|
||||||
for i := uint(0); i < y; i++ {
|
|
||||||
ret *= x
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix the paths.
|
// Subtracts 1 from the last byte in a path slice, carrying if needed.
|
||||||
start, end = normalizePathRange(start, end, depth)
|
// Does nothing, returning false, for all-zero inputs.
|
||||||
|
func decrementPath(path []byte) bool {
|
||||||
// No negative distances, if the start is already >= end, the distance is 0.
|
// check for all zeros
|
||||||
if bytes.Compare(start, end) >= 0 {
|
allzero := true
|
||||||
return 0
|
for i := 0; i < len(path); i++ {
|
||||||
|
allzero = allzero && path[i] == 0
|
||||||
}
|
}
|
||||||
|
if allzero {
|
||||||
// Subtract each component, right to left, carrying over if necessary.
|
return false
|
||||||
difference := make([]byte, len(start))
|
}
|
||||||
var carry byte = 0
|
for i := len(path) - 1; i >= 0; i-- {
|
||||||
for i := len(start) - 1; i >= 0; i-- {
|
val := path[i]
|
||||||
result := end[i] - start[i] - carry
|
path[i]--
|
||||||
if result > 0xf && i > 0 {
|
if val == 0 {
|
||||||
result &= 0xf
|
path[i] = 0xf
|
||||||
carry = 1
|
|
||||||
} else {
|
} else {
|
||||||
carry = 0
|
return true
|
||||||
}
|
}
|
||||||
difference[i] = result
|
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
// Calculate the result.
|
|
||||||
var ret uint64 = 0
|
|
||||||
for i := 0; i < len(difference); i++ {
|
|
||||||
ret += uint64(difference[i]) * pow(16, uint(len(difference)-i-1))
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
}
|
||||||
|
63
pkg/types/node_type.go
Normal file
63
pkg/types/node_type.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// Copyright © 2020 Vulcanize, Inc
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// node for holding trie node information
|
||||||
|
type Node struct {
|
||||||
|
NodeType nodeType
|
||||||
|
Path []byte
|
||||||
|
Key common.Hash
|
||||||
|
Value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// nodeType for explicitly setting type of node
|
||||||
|
type nodeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
Branch nodeType = iota
|
||||||
|
Extension
|
||||||
|
Leaf
|
||||||
|
Removed
|
||||||
|
Unknown
|
||||||
|
)
|
||||||
|
|
||||||
|
// CheckKeyType checks what type of key we have
|
||||||
|
func CheckKeyType(elements []interface{}) (nodeType, error) {
|
||||||
|
if len(elements) > 2 {
|
||||||
|
return Branch, nil
|
||||||
|
}
|
||||||
|
if len(elements) < 2 {
|
||||||
|
return Unknown, fmt.Errorf("node cannot be less than two elements in length")
|
||||||
|
}
|
||||||
|
switch elements[0].([]byte)[0] / 16 {
|
||||||
|
case '\x00':
|
||||||
|
return Extension, nil
|
||||||
|
case '\x01':
|
||||||
|
return Extension, nil
|
||||||
|
case '\x02':
|
||||||
|
return Leaf, nil
|
||||||
|
case '\x03':
|
||||||
|
return Leaf, nil
|
||||||
|
default:
|
||||||
|
return Unknown, fmt.Errorf("unknown hex prefix")
|
||||||
|
}
|
||||||
|
}
|
20
pkg/types/publisher.go
Normal file
20
pkg/types/publisher.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Publisher interface {
|
||||||
|
PublishHeader(header *types.Header) error
|
||||||
|
PublishStateNode(node *Node, headerID string, tx Tx) error
|
||||||
|
PublishStorageNode(node *Node, headerID string, statePath []byte, tx Tx) error
|
||||||
|
PublishCode(codeHash common.Hash, codeBytes []byte, tx Tx) error
|
||||||
|
BeginTx() (Tx, error)
|
||||||
|
PrepareTxForBatch(tx Tx, batchSize uint) (Tx, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tx interface {
|
||||||
|
Rollback() error
|
||||||
|
Commit() error
|
||||||
|
}
|
73
pkg/types/schema.go
Normal file
73
pkg/types/schema.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
var TableIPLDBlock = Table{
|
||||||
|
`public.blocks`,
|
||||||
|
[]column{
|
||||||
|
{"key", text},
|
||||||
|
{"data", bytea},
|
||||||
|
},
|
||||||
|
`ON CONFLICT (key) DO NOTHING`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var TableNodeInfo = Table{
|
||||||
|
Name: `public.nodes`,
|
||||||
|
Columns: []column{
|
||||||
|
{"genesis_block", varchar},
|
||||||
|
{"network_id", varchar},
|
||||||
|
{"node_id", varchar},
|
||||||
|
{"client_name", varchar},
|
||||||
|
{"chain_id", integer},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var TableHeader = Table{
|
||||||
|
"eth.header_cids",
|
||||||
|
[]column{
|
||||||
|
{"block_number", bigint},
|
||||||
|
{"block_hash", varchar},
|
||||||
|
{"parent_hash", varchar},
|
||||||
|
{"cid", text},
|
||||||
|
{"td", numeric},
|
||||||
|
{"node_id", varchar},
|
||||||
|
{"reward", numeric},
|
||||||
|
{"state_root", varchar},
|
||||||
|
{"tx_root", varchar},
|
||||||
|
{"receipt_root", varchar},
|
||||||
|
{"uncle_root", varchar},
|
||||||
|
{"bloom", bytea},
|
||||||
|
{"timestamp", numeric},
|
||||||
|
{"mh_key", text},
|
||||||
|
{"times_validated", integer},
|
||||||
|
{"coinbase", varchar},
|
||||||
|
},
|
||||||
|
"ON CONFLICT (block_hash) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = (EXCLUDED.parent_hash, EXCLUDED.cid, EXCLUDED.td, EXCLUDED.node_id, EXCLUDED.reward, EXCLUDED.state_root, EXCLUDED.tx_root, EXCLUDED.receipt_root, EXCLUDED.uncle_root, EXCLUDED.bloom, EXCLUDED.timestamp, EXCLUDED.mh_key, eth.header_cids.times_validated + 1, EXCLUDED.coinbase)",
|
||||||
|
}
|
||||||
|
|
||||||
|
var TableStateNode = Table{
|
||||||
|
"eth.state_cids",
|
||||||
|
[]column{
|
||||||
|
{"header_id", varchar},
|
||||||
|
{"state_leaf_key", varchar},
|
||||||
|
{"cid", text},
|
||||||
|
{"state_path", bytea},
|
||||||
|
{"node_type", integer},
|
||||||
|
{"diff", boolean},
|
||||||
|
{"mh_key", text},
|
||||||
|
},
|
||||||
|
`ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.state_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var TableStorageNode = Table{
|
||||||
|
"eth.storage_cids",
|
||||||
|
[]column{
|
||||||
|
{"header_id", varchar},
|
||||||
|
{"state_path", bytea},
|
||||||
|
{"storage_leaf_key", varchar},
|
||||||
|
{"cid", text},
|
||||||
|
{"storage_path", bytea},
|
||||||
|
{"node_type", integer},
|
||||||
|
{"diff", boolean},
|
||||||
|
{"mh_key", text},
|
||||||
|
},
|
||||||
|
"ON CONFLICT (header_id, state_path, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = (EXCLUDED.storage_leaf_key, EXCLUDED.cid, EXCLUDED.node_type, EXCLUDED.diff, EXCLUDED.mh_key)",
|
||||||
|
}
|
79
pkg/types/table.go
Normal file
79
pkg/types/table.go
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type colType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
integer colType = iota
|
||||||
|
boolean
|
||||||
|
bigint
|
||||||
|
numeric
|
||||||
|
bytea
|
||||||
|
varchar
|
||||||
|
text
|
||||||
|
)
|
||||||
|
|
||||||
|
type column struct {
|
||||||
|
name string
|
||||||
|
typ colType
|
||||||
|
}
|
||||||
|
type Table struct {
|
||||||
|
Name string
|
||||||
|
Columns []column
|
||||||
|
conflictClause string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
|
||||||
|
var row []string
|
||||||
|
for i, col := range tbl.Columns {
|
||||||
|
row = append(row, col.typ.formatter()(args[i]))
|
||||||
|
}
|
||||||
|
return row
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tbl *Table) ToInsertStatement() string {
|
||||||
|
var colnames, placeholders []string
|
||||||
|
for i, col := range tbl.Columns {
|
||||||
|
colnames = append(colnames, col.name)
|
||||||
|
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"INSERT INTO %s (%s) VALUES (%s) %s",
|
||||||
|
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "), tbl.conflictClause,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type colfmt = func(interface{}) string
|
||||||
|
|
||||||
|
func sprintf(f string) colfmt {
|
||||||
|
return func(x interface{}) string { return fmt.Sprintf(f, x) }
|
||||||
|
}
|
||||||
|
|
||||||
|
func (typ colType) formatter() colfmt {
|
||||||
|
switch typ {
|
||||||
|
case integer:
|
||||||
|
return sprintf("%d")
|
||||||
|
case boolean:
|
||||||
|
return func(x interface{}) string {
|
||||||
|
if x.(bool) {
|
||||||
|
return "t"
|
||||||
|
}
|
||||||
|
return "f"
|
||||||
|
}
|
||||||
|
case bigint:
|
||||||
|
return sprintf("%s")
|
||||||
|
case numeric:
|
||||||
|
return sprintf("%d")
|
||||||
|
case bytea:
|
||||||
|
return sprintf(`\x%x`)
|
||||||
|
case varchar:
|
||||||
|
return sprintf("%s")
|
||||||
|
case text:
|
||||||
|
return sprintf("%s")
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
33
pkg/types/util.go
Normal file
33
pkg/types/util.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var nullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
|
||||||
|
func IsNullHash(hash common.Hash) bool {
|
||||||
|
return bytes.Equal(hash.Bytes(), nullHash.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func CommitOrRollback(tx Tx, err error) error {
|
||||||
|
var rberr error
|
||||||
|
defer func() {
|
||||||
|
if rberr != nil {
|
||||||
|
logrus.Errorf("rollback failed: %s", rberr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if rec := recover(); rec != nil {
|
||||||
|
rberr = tx.Rollback()
|
||||||
|
panic(rec)
|
||||||
|
} else if err != nil {
|
||||||
|
rberr = tx.Rollback()
|
||||||
|
} else {
|
||||||
|
err = tx.Commit()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
@ -1,73 +0,0 @@
|
|||||||
## Data Validation
|
|
||||||
|
|
||||||
* For a given table in the `ipld-eth-db` schema, we know the number of columns to be expected in each row in the data dump:
|
|
||||||
|
|
||||||
| Table | Expected columns |
|
|
||||||
|--------------------|:----------------:|
|
|
||||||
| `public.nodes` | 5 |
|
|
||||||
| `ipld.blocks` | 3 |
|
|
||||||
| `eth.header_cids` | 16 |
|
|
||||||
| `eth.state_cids` | 8 |
|
|
||||||
| `eth.storage_cids` | 9 |
|
|
||||||
|
|
||||||
### Find Bad Data
|
|
||||||
|
|
||||||
* Run the following command to find any rows having unexpected number of columns:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/find-bad-rows.sh -i <input-file> -c <expected-columns> -o [output-file] -d [include-data]
|
|
||||||
```
|
|
||||||
|
|
||||||
* `input-file` `-i`: Input data file path
|
|
||||||
* `expected-columns` `-c`: Expected number of columns in each row of the input file
|
|
||||||
* `output-file` `-o`: Output destination file path (default: `STDOUT`)
|
|
||||||
* `include-data` `-d`: Whether to include the data row in the output (`true | false`) (default: `false`)
|
|
||||||
* The output is of format: row number, number of columns, the data row
|
|
||||||
|
|
||||||
Eg:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
|
||||||
```
|
|
||||||
|
|
||||||
Output:
|
|
||||||
|
|
||||||
```
|
|
||||||
1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
|
|
||||||
```
|
|
||||||
|
|
||||||
Eg:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/find-bad-rows.sh -i public.nodes.csv -c 5 -o res.txt -d true
|
|
||||||
./scripts/find-bad-rows.sh -i ipld.blocks.csv -c 3 -o res.txt -d true
|
|
||||||
./scripts/find-bad-rows.sh -i eth.header_cids.csv -c 16 -o res.txt -d true
|
|
||||||
./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
|
||||||
./scripts/find-bad-rows.sh -i eth.storage_cids.csv -c 9 -o res.txt -d true
|
|
||||||
```
|
|
||||||
|
|
||||||
## Data Cleanup
|
|
||||||
|
|
||||||
* In case of column count mismatch, data from `file` mode dumps can't be imported readily into `ipld-eth-db`.
|
|
||||||
|
|
||||||
### Filter Bad Data
|
|
||||||
|
|
||||||
* Run the following command to filter out rows having unexpected number of columns:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/filter-bad-rows.sh -i <input-file> -c <expected-columns> -o <output-file>
|
|
||||||
```
|
|
||||||
|
|
||||||
* `input-file` `-i`: Input data file path
|
|
||||||
* `expected-columns` `-c`: Expected number of columns in each row of the input file
|
|
||||||
* `output-file` `-o`: Output destination file path
|
|
||||||
|
|
||||||
Eg:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/filter-bad-rows.sh -i public.nodes.csv -c 5 -o cleaned-public.nodes.csv
|
|
||||||
./scripts/filter-bad-rows.sh -i ipld.blocks.csv -c 3 -o cleaned-ipld.blocks.csv
|
|
||||||
./scripts/filter-bad-rows.sh -i eth.header_cids.csv -c 16 -o cleaned-eth.header_cids.csv
|
|
||||||
./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
|
|
||||||
./scripts/filter-bad-rows.sh -i eth.storage_cids.csv -c 9 -o cleaned-eth.storage_cids.csv
|
|
||||||
```
|
|
@ -1,87 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Compare the full snapshot output from two versions of the service
|
|
||||||
#
|
|
||||||
# Usage: compare-versions.sh [-d <output-dir>] <binary-A> <binary-B>
|
|
||||||
|
|
||||||
# Configure the input data using environment vars.
|
|
||||||
(
|
|
||||||
set -u
|
|
||||||
: $SNAPSHOT_BLOCK_HEIGHT
|
|
||||||
: $ETHDB_PATH
|
|
||||||
: $ETHDB_ANCIENT
|
|
||||||
: $ETH_GENESIS_BLOCK
|
|
||||||
)
|
|
||||||
|
|
||||||
while getopts d: opt; do
|
|
||||||
case $opt in
|
|
||||||
d) output_dir="$OPTARG"
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
shift $((OPTIND - 1))
|
|
||||||
|
|
||||||
binary_A=$1
|
|
||||||
binary_B=$2
|
|
||||||
shift 2
|
|
||||||
|
|
||||||
if [[ -z $output_dir ]]; then
|
|
||||||
output_dir=$(mktemp -d)
|
|
||||||
fi
|
|
||||||
|
|
||||||
export SNAPSHOT_MODE=postgres
|
|
||||||
export SNAPSHOT_WORKERS=32
|
|
||||||
export SNAPSHOT_RECOVERY_FILE='compare-snapshots-recovery.txt'
|
|
||||||
|
|
||||||
export DATABASE_NAME="cerc_testing"
|
|
||||||
export DATABASE_HOSTNAME="localhost"
|
|
||||||
export DATABASE_PORT=8077
|
|
||||||
export DATABASE_USER="vdbm"
|
|
||||||
export DATABASE_PASSWORD="password"
|
|
||||||
|
|
||||||
export ETH_CLIENT_NAME=test-client
|
|
||||||
export ETH_NODE_ID=test-node
|
|
||||||
export ETH_NETWORK_ID=test-network
|
|
||||||
export ETH_CHAIN_ID=4242
|
|
||||||
|
|
||||||
dump_table() {
|
|
||||||
statement="copy (select * from $1) to stdout with csv"
|
|
||||||
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
|
||||||
psql -q cerc_testing -U vdbm -c "$statement" | sort -u > "$2/$1.csv"
|
|
||||||
}
|
|
||||||
|
|
||||||
clear_table() {
|
|
||||||
docker exec -e PGPASSWORD=password test-ipld-eth-db-1 \
|
|
||||||
psql -q cerc_testing -U vdbm -c "truncate $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
tables=(
|
|
||||||
eth.log_cids
|
|
||||||
eth.receipt_cids
|
|
||||||
eth.state_cids
|
|
||||||
eth.storage_cids
|
|
||||||
eth.transaction_cids
|
|
||||||
eth.uncle_cids
|
|
||||||
ipld.blocks
|
|
||||||
public.nodes
|
|
||||||
)
|
|
||||||
|
|
||||||
for table in "${tables[@]}"; do
|
|
||||||
clear_table $table
|
|
||||||
done
|
|
||||||
|
|
||||||
$binary_A stateSnapshot
|
|
||||||
|
|
||||||
mkdir -p $output_dir/A
|
|
||||||
for table in "${tables[@]}"; do
|
|
||||||
dump_table $table $output_dir/A
|
|
||||||
clear_table $table
|
|
||||||
done
|
|
||||||
|
|
||||||
$binary_B stateSnapshot
|
|
||||||
|
|
||||||
mkdir -p $output_dir/B
|
|
||||||
for table in "${tables[@]}"; do
|
|
||||||
dump_table $table $output_dir/B
|
|
||||||
clear_table $table
|
|
||||||
done
|
|
||||||
|
|
||||||
diff -rs $output_dir/A $output_dir/B
|
|
@ -1,29 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# flags
|
|
||||||
# -i <input-file>: Input data file path
|
|
||||||
# -c <expected-columns>: Expected number of columns in each row of the input file
|
|
||||||
# -o [output-file]: Output destination file path
|
|
||||||
|
|
||||||
# eg: ./scripts/filter-bad-rows.sh -i eth.state_cids.csv -c 8 -o cleaned-eth.state_cids.csv
|
|
||||||
|
|
||||||
while getopts i:c:o: OPTION
|
|
||||||
do
|
|
||||||
case "${OPTION}" in
|
|
||||||
i) inputFile=${OPTARG};;
|
|
||||||
c) expectedColumns=${OPTARG};;
|
|
||||||
o) outputFile=${OPTARG};;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
timestamp=$(date +%s)
|
|
||||||
|
|
||||||
# select only rows having expected number of columns
|
|
||||||
if [ -z "${outputFile}" ]; then
|
|
||||||
echo "Invalid destination file arg (-o) ${outputFile}"
|
|
||||||
else
|
|
||||||
awk -F"," "NF==${expectedColumns}" ${inputFile} > ${outputFile}
|
|
||||||
fi
|
|
||||||
|
|
||||||
difference=$(($(date +%s)-timestamp))
|
|
||||||
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)
|
|
@ -1,43 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# flags
|
|
||||||
# -i <input-file>: Input data file path
|
|
||||||
# -c <expected-columns>: Expected number of columns in each row of the input file
|
|
||||||
# -o [output-file]: Output destination file path (default: STDOUT)
|
|
||||||
# -d [include-data]: Whether to include the data row in output (true | false) (default: false)
|
|
||||||
|
|
||||||
# eg: ./scripts/find-bad-rows.sh -i eth.state_cids.csv -c 8 -o res.txt -d true
|
|
||||||
# output: 1 9 1500000,xxxxxxxx,0x83952d392f9b0059eea94b10d1a095eefb1943ea91595a16c6698757127d4e1c,,
|
|
||||||
# baglacgzasvqcntdahkxhufdnkm7a22s2eetj6mx6nzkarwxtkvy4x3bubdgq,\x0f,0,f,/blocks/,
|
|
||||||
# DMQJKYBGZRQDVLT2CRWVGPQNNJNCCJU7GL7G4VAI3LZVK4OL5Q2ARTI
|
|
||||||
|
|
||||||
while getopts i:c:o:d: OPTION
|
|
||||||
do
|
|
||||||
case "${OPTION}" in
|
|
||||||
i) inputFile=${OPTARG};;
|
|
||||||
c) expectedColumns=${OPTARG};;
|
|
||||||
o) outputFile=${OPTARG};;
|
|
||||||
d) data=${OPTARG};;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
timestamp=$(date +%s)
|
|
||||||
|
|
||||||
# if data requested, dump row number, number of columns and the row
|
|
||||||
if [ "${data}" = true ] ; then
|
|
||||||
if [ -z "${outputFile}" ]; then
|
|
||||||
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile}
|
|
||||||
else
|
|
||||||
awk -F"," "NF!=${expectedColumns} {print NR, NF, \$0}" < ${inputFile} > ${outputFile}
|
|
||||||
fi
|
|
||||||
# else, dump only row number, number of columns
|
|
||||||
else
|
|
||||||
if [ -z "${outputFile}" ]; then
|
|
||||||
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile}
|
|
||||||
else
|
|
||||||
awk -F"," "NF!=${expectedColumns} {print NR, NF}" < ${inputFile} > ${outputFile}
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
difference=$(($(date +%s)-timestamp))
|
|
||||||
echo Time taken: $(date -d@${difference} -u +%H:%M:%S)
|
|
@ -1,63 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Exit if the variable tests fail
|
|
||||||
set -e
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
|
||||||
env
|
|
||||||
set -x
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check the database variables are set
|
|
||||||
test "$VDB_COMMAND"
|
|
||||||
|
|
||||||
# docker must be run in privileged mode for mounts to work
|
|
||||||
echo "Setting up /app/geth-rw overlayed /app/geth-ro"
|
|
||||||
mkdir -p /tmp/overlay
|
|
||||||
mount -t tmpfs tmpfs /tmp/overlay
|
|
||||||
mkdir -p /tmp/overlay/upper
|
|
||||||
mkdir -p /tmp/overlay/work
|
|
||||||
mkdir -p /app/geth-rw
|
|
||||||
|
|
||||||
mount -t overlay overlay -o lowerdir=/app/geth-ro,upperdir=/tmp/overlay/upper,workdir=/tmp/overlay/work /app/geth-rw
|
|
||||||
|
|
||||||
mkdir /var/run/statediff
|
|
||||||
cd /var/run/statediff
|
|
||||||
|
|
||||||
SETUID=""
|
|
||||||
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
|
|
||||||
SETUID="su-exec $TARGET_UID:$TARGET_GID"
|
|
||||||
chown -R $TARGET_UID:$TARGET_GID /var/run/statediff
|
|
||||||
fi
|
|
||||||
|
|
||||||
START_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
|
|
||||||
echo "Running the snapshot service" && \
|
|
||||||
if [[ -n "$LOG_FILE" ]]; then
|
|
||||||
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $* |& $SETUID tee ${LOG_FILE}.console
|
|
||||||
rc=$?
|
|
||||||
else
|
|
||||||
$SETUID /app/ipld-eth-state-snapshot "$VDB_COMMAND" $*
|
|
||||||
rc=$?
|
|
||||||
fi
|
|
||||||
STOP_TIME=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
|
|
||||||
|
|
||||||
if [ $rc -eq 0 ] && [ "$VDB_COMMAND" == "stateSnapshot" ] && [ -n "$SNAPSHOT_BLOCK_HEIGHT" ]; then
|
|
||||||
cat >metadata.json <<EOF
|
|
||||||
{
|
|
||||||
"type": "snapshot",
|
|
||||||
"range": { "start": $SNAPSHOT_BLOCK_HEIGHT, "stop": $SNAPSHOT_BLOCK_HEIGHT },
|
|
||||||
"nodeId": "$ETH_NODE_ID",
|
|
||||||
"genesisBlock": "$ETH_GENESIS_BLOCK",
|
|
||||||
"networkId": "$ETH_NETWORK_ID",
|
|
||||||
"chainId": "$ETH_CHAIN_ID",
|
|
||||||
"time": { "start": "$START_TIME", "stop": "$STOP_TIME" }
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
if [[ -n "$TARGET_UID" ]] && [[ -n "$TARGET_GID" ]]; then
|
|
||||||
echo 'metadata.json' | cpio -p --owner $TARGET_UID:$TARGET_GID $FILE_OUTPUT_DIR
|
|
||||||
else
|
|
||||||
cp metadata.json $FILE_OUTPUT_DIR
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit $rc
|
|
@ -1,23 +0,0 @@
|
|||||||
[database]
|
|
||||||
name = "cerc_testing"
|
|
||||||
hostname = "127.0.0.1"
|
|
||||||
port = 8077
|
|
||||||
user = "vdbm"
|
|
||||||
password = "password"
|
|
||||||
|
|
||||||
[log]
|
|
||||||
level = "debug"
|
|
||||||
|
|
||||||
[snapshot]
|
|
||||||
workers = 4
|
|
||||||
recoveryFile = "snapshot_recovery_file"
|
|
||||||
# Note: these are overriden in the workflow step
|
|
||||||
# mode = "postgres"
|
|
||||||
# blockHeight = 0
|
|
||||||
|
|
||||||
[ethereum]
|
|
||||||
clientName = "test-client"
|
|
||||||
nodeID = "test-node"
|
|
||||||
networkID = "test-network"
|
|
||||||
chainID = 1
|
|
||||||
genesisBlock = ""
|
|
@ -1,30 +0,0 @@
|
|||||||
services:
|
|
||||||
migrations:
|
|
||||||
restart: on-failure
|
|
||||||
depends_on:
|
|
||||||
- ipld-eth-db
|
|
||||||
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.3.0-alpha
|
|
||||||
environment:
|
|
||||||
DATABASE_USER: "vdbm"
|
|
||||||
DATABASE_NAME: "cerc_testing"
|
|
||||||
DATABASE_PASSWORD: "password"
|
|
||||||
DATABASE_HOSTNAME: "ipld-eth-db"
|
|
||||||
DATABASE_PORT: 5432
|
|
||||||
|
|
||||||
ipld-eth-db:
|
|
||||||
image: timescale/timescaledb:latest-pg14
|
|
||||||
restart: always
|
|
||||||
command: ["postgres", "-c", "log_statement=all"]
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: "vdbm"
|
|
||||||
POSTGRES_DB: "cerc_testing"
|
|
||||||
POSTGRES_PASSWORD: "password"
|
|
||||||
ports:
|
|
||||||
- 0.0.0.0:8077:5432
|
|
||||||
volumes:
|
|
||||||
- /tmp:/tmp
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "pg_isready", "-U", "vdbm"]
|
|
||||||
interval: 2s
|
|
||||||
timeout: 1s
|
|
||||||
retries: 3
|
|
@ -1,438 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/cerc-io/eth-testing/chains/premerge2"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for _, path := range premerge2.Block1_StateNodeLeafKeys {
|
|
||||||
hex := common.BytesToHash(path).String()
|
|
||||||
ChainA_Block1_StateNodeLeafKeys = append(ChainA_Block1_StateNodeLeafKeys, hex)
|
|
||||||
}
|
|
||||||
// sort it
|
|
||||||
sort.Slice(ChainA_Block1_StateNodeLeafKeys, func(i, j int) bool {
|
|
||||||
return ChainA_Block1_StateNodeLeafKeys[i] < ChainA_Block1_StateNodeLeafKeys[j]
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ChainA = premerge2.ChainData
|
|
||||||
|
|
||||||
ChainA_Block1_StateNodeLeafKeys []string
|
|
||||||
// ChainA_Block1_StateNodeLeafKeys = small2.Block1_StateNodeLeafKeys
|
|
||||||
|
|
||||||
ChainA_Block1_IpldCids = []string{
|
|
||||||
"baglacgzamidvfvv6vdpeagumkeadfy4sek3fwba5wnuegt6mcsrcl2y3qxfq",
|
|
||||||
"baglacgzakk2zjdmtcwpduxyzd5accfkyebufm3j3eldwon6e3gosyps4nmia",
|
|
||||||
"baglacgzaxt5p24gzgsgqqpd5fyheuufvaex4gfojqntngvewfdhe54poe7jq",
|
|
||||||
"baglacgzapngkev2hcarm7bmcwdrvagxu27mgu5tp25y76kzkvjmrggrora4a",
|
|
||||||
"baglacgza5fhbdiu6o3ibtl7jahjwagqs27knhtmehxvoyt6qg7wuodaek2qq",
|
|
||||||
"baglacgzakho5pd5qpbxs7mo3ujd7ejcjyhstznb3xx3fluukdjyybxn4aexa",
|
|
||||||
"baglacgza2dbonmaqxik2vhbnfzd4dhcpyjm47rlbuz35cha3jy7jyxvrsoxa",
|
|
||||||
"baglacgza5gn7vz4ksy4go5joxn3zn2hgzf7sudxlq7fthztqhj2ikql3spva",
|
|
||||||
"baglacgzas6yxvcp5fqb65gglmrm4bd2rwju5uxhoizsq5bchb5rl7a5uh37a",
|
|
||||||
"baglacgzamzsn226lwcfyh6cdetnyzoxsz2zcdze6m2lrg2o5ejl6sr5dwe6q",
|
|
||||||
"baglacgzasogvybtxh67x26ob42m56mlgnxwdelfb24oobk3po3te6yysmmca",
|
|
||||||
"baglacgzab7rmzczswht4isr63gea5uoww4pmqsxrvgzn74wheqwopl36mela",
|
|
||||||
"baglacgza2ovtxz2bp6yccm56iacbpp4kgthyz4k6evyp5lq4rzmp2c23mnhq",
|
|
||||||
"baglacgzajf3sy2bvf2vu2d4hqvj3rvq5lblzp4qptxfb4ulcyayhrrdszghq",
|
|
||||||
"baglacgza4wczwxeuvdhklly5renpmti4x34ilhhmgdlcro5jjpyhowgvdwpa",
|
|
||||||
"baglacgzazikph4bqhr7vgs2xiqpebvoyazj27mftysmy6mzoigkutxdxt7ma",
|
|
||||||
"baglacgzasvwqbzd4k6hoheken36oszbb6b6dvfc46acsyhfqssajcqd4xzcq",
|
|
||||||
"baglacgzaui2r4k54xxqxadyjt25kzovmlelw4obn3fpda6gecswheklvrhia",
|
|
||||||
"baglacgzacq4j5rfibfkuxvwa5ui6zpeq7h6edgmquy3oguz6zxxbdkfw6upa",
|
|
||||||
"baglacgzalihtntqwaqxyc5z3olm3odzztqlq6d27rx5mdt4gu2bdxgwwp7xa",
|
|
||||||
"baglacgzat5btacphq4ie5kecajgxjfgvooqza4zb47w24ibv5yvz2dy7zyea",
|
|
||||||
"baglacgzaet376qv35issfdnd44lpe3xxtmzycg56mibqh3ehd6pxbxj6bpda",
|
|
||||||
"baglacgzafkeckix5qfiuuorchl6xdg2o6vis2qknjirq63vryuqcyl24kwxa",
|
|
||||||
"baglacgzayesgx5kytkdemwcwmhxd435ka4aqqpwm6qugtirlnpyoyjexg2ka",
|
|
||||||
"baglacgzamknqvkqe37lskybr6dimt5ngmihfsmnoe5mi4yvtu7dq7tylh5ua",
|
|
||||||
"baglacgzaniotnde2dyyjhdnud5batwqnq3njuh2gotx6hivafivq4qtt22oq",
|
|
||||||
"baglacgzaov7f7oz4onncim5hhnlbjlz7ozpom26kfh66vjow3w2s2cok6ska",
|
|
||||||
"baglacgzai2u7cil4gzmzbas3pulb7qr4vzlirt5wwiyh57slomwhepqdpfma",
|
|
||||||
"baglacgza6twdmxbxie5v7ht5hdb4mqezel5cuwjxk7xwc5vxfepn4wxcwllq",
|
|
||||||
"baglacgzanax447kk5lah6ed5gqzg2eefwyygfn3l3w6n7eio3w5ohhluo7ca",
|
|
||||||
"baglacgzawxgpzpbsbi43icxcrchpoxxcaugcsvh6eusiswwjrtkdlugveana",
|
|
||||||
"baglacgzajshfqz2lgrejfi37nhstsxmjeh7c2jfok4znn4fezhmr2mlwpzhq",
|
|
||||||
"baglacgza3ask2jt3sjqfdiuxxx3fjipnxzp2u3in6z5d3qflo5fxh7ihmf6a",
|
|
||||||
"baglacgzavtfwj5dsgw4vpplzv3zsw6fwiykcpz2lpclspzq55u42vij2g2pq",
|
|
||||||
"baglacgzaelxcuf3wfrqavkk2uunaqjwp3wiuisjreuarwnbiqtdbrq5kwkuq",
|
|
||||||
"baglacgzajieha4wgbglqnmt4wbooug3ffnvayz2lqkqpop36elnocsvprkeq",
|
|
||||||
"baglacgza424ea7tewjqbcwi5fwcticsbiinwh7ffdf2jeqrmjzrpv7xpo75q",
|
|
||||||
"baglacgzajg3cp7yoxohz7luw4hzvg5cnzcduabrogcqy7ilhwhp64nmsn72a",
|
|
||||||
"baglacgza6ogjls57pq4k35agbzpeydujoq65lpoimp4iv2d6cegrdjk4frwa",
|
|
||||||
"baglacgzaqr6cfr453mxviwkqsjfz3riq3nw3lrh7lmev2nuwoop34mjmgjta",
|
|
||||||
"baglacgza5wvocvjvd6bdjteyzt3y7sdimlfxra6c4ndihqlk3oewgwclny3q",
|
|
||||||
"baglacgzamxpcef5svw5bshjcmx5dtw3jvdnsqxyqdoystvutgpk3dbxaddsa",
|
|
||||||
"baglacgzaihrnrw2zuaucifxzmpyg5kz2evaagrybgq2nm4sif3jhr7mljnka",
|
|
||||||
"baglacgzaydqlktfraw5nig2lsjmigudumpo7vzy4mgn2fza5nvl5ukri577a",
|
|
||||||
"baglacgzab2orhwmiw5gxfsqb3bwckhf3tf5jztbbdn2i5eyk2kvd3zfi7hlq",
|
|
||||||
"baglacgzamfflp7uex2uddjuoly44nywthhnugk4u3tjjvr2542km7rtecsla",
|
|
||||||
"baglacgzasfy3a6qvsisuwzgjm3w7vukbubffxx7ei3eqh7f3v2ftrqrfhiwa",
|
|
||||||
"baglacgzayrdorxqktwlfykcpqo3uhyfds3rlsjy6rcapz42x2lsc64otdonq",
|
|
||||||
"baglacgzajwya3t5k5mqyvipqqlahodjmmsljwe4df42igrc7pdgqzbc725sa",
|
|
||||||
"baglacgzalc6y4rmk42q6ix5cxhpinwyhlbnjobwb4knsqr3xe6qv7m6qkibq",
|
|
||||||
"baglacgzaidbvljbgsc2tpdyjwzcsqpszjotijnbls37ropeazffsoi2wamkq",
|
|
||||||
"baglacgzacuyuir4l6vee5vuf5elh7tvnwzymf44c4qpzu2ipo2tbbyp4e3oq",
|
|
||||||
"baglacgza6coc33lehemkv73byblozayqgaclz6xko4kla5pcptbgwhkyoibq",
|
|
||||||
"baglacgza7uco7rtze752545y336slgt7pczgdpmkb6j65x3yydfsprerba5a",
|
|
||||||
"baglacgza4eanzp6ludjfoqr4h67bzlsxjartrqqeq5t4pv2q3b4x2padxbiq",
|
|
||||||
"baglacgzaoocvbederlpqaufwkwso5pl7qkfnrpd76zj6zbwgj5f4qcygis3a",
|
|
||||||
"baglacgzavx7pxqr4m7pfzcn6tcc7o5pq4g5tp6qvsykkhe6rugqat4a2kuxq",
|
|
||||||
"baglacgzaljiw3say55ek5m3x64e66wcifr5na7vbutyuu3m74gimlh47g44q",
|
|
||||||
"baglacgzaqrzyy5uetfwsqgfvv624scsdw7dx7z42pf47p2m3xuhqwuei27ha",
|
|
||||||
"baglacgzayxrz3npxgaz2byd4onx5phnjyfwxfovjbztg6ddrhwew7pvynq7q",
|
|
||||||
"baglacgzac2cndcn3vq5mnjfoz7kdnboebmshmdmvnb6aatzkwnegyfug3cqq",
|
|
||||||
"baglacgza66vjwzsh6wgfv72zygbwgh2vufhfuagmf36q6r3ycnwxx7yaxqnq",
|
|
||||||
"baglacgzac5uhfzgshqvvqme5iw5rx4n3g5lij4eapzaejzpgm6njrec45qaa",
|
|
||||||
"baglacgza6ta2auxqjgh7o2oj6x3ogcrx4cgfxlupdccrq4j3p5zjnahnq7mq",
|
|
||||||
"baglacgzaaokqnkj6sgq57ikquob6w6uhvo6v7ni6uy677pqzr24f3nyll5eq",
|
|
||||||
"baglacgzavwymwhn2owqnbm43vvqtxgd3ab5caqalvs4sz2tzc4cs74b43q5q",
|
|
||||||
"baglacgzahlzt3rfhisvv5xkcyxc73sm6ijh54n42zfsq76ysi3jisro646fa",
|
|
||||||
"baglacgzaqhglxiq5ptweegtm64wuezj7spc2u2g5prw6zdgnwmjwfxdbn5nq",
|
|
||||||
"baglacgzadztftc3rxrphupebphkbwuzdtnthtyl4pfxga7wghxthe463ncya",
|
|
||||||
"baglacgzaz6agggjviebqoyw3sdos6z3jprjr5fe5vprt7dlarq5gxm2swdvq",
|
|
||||||
"baglacgzasdc5a3pa4mtp46bpsru56aojakeucvy57654mq5o2bjp5mop6l3a",
|
|
||||||
"baglacgzaqwwwnlav6alcw7r2umugzbxppixu6mqp6w6qyriffo27mummjmca",
|
|
||||||
"baglacgzabmrd6yhbgxhmghn5nguatwnzhrondlaxmagzxyzqdm24gooneucq",
|
|
||||||
"baglacgzajblmw25dyrzwsfymo74y5h67v4nrfgxs35eevemvqfui3y7rkszq",
|
|
||||||
"baglacgzaivgvcrgjwicuf4aremv2hbggrnzntrddmydzud6rkbpb3xrbpdia",
|
|
||||||
"baglacgzagpnopg2w6cmfzi3avh7c7ovd6rlwmnpu45kkb3wmlx3etchiggkq",
|
|
||||||
"baglacgzaom4zyvyb6kn2hoiyvwg2ywrwgr7o5fe5c3p42z4vuhfzuxmlaoaa",
|
|
||||||
"baglacgzawj7icprvylimlisn2p2626vxy7ukwps4t67gvrhduz5hlk4aecyq",
|
|
||||||
"baglacgzatnjb6dg7fsz4pesso63i63c3t2agwybbgd3i5u4ezthjuvddspea",
|
|
||||||
"baglacgza5oahzgmmqeqqszmqsfbwaq36gbirizq6aii3zm3jyud3pgndchlq",
|
|
||||||
"baglacgzaxyyowwmsdsveoyjw7ywj67krm3x77iqyy3gzj7fdc4xnzjyirsfa",
|
|
||||||
"baglacgzaew7pv5vcxev3udk3dh4eaezwpjgi2pxwqa3umwmtoiw25q5foqwq",
|
|
||||||
"baglacgzapexdm6koz42fosvv4qjbqhnhsuevh7oqmqwonspl63t2vpjqitha",
|
|
||||||
"baglacgzaixcais2z6gwyafi6bpptra65xswthhpd5g26yr3d6ahn3bl2uvca",
|
|
||||||
"baglacgzaimssao3zceshkgh6gltjqqqh2x5qiodirixcvjqutgvdphog7dma",
|
|
||||||
"baglacgzacgrm2zlg4dogiza57lwcti5r7ga6ucswdsp3mp2277jfa7yx77fa",
|
|
||||||
"baglacgzapsts4gledg5dyjaileaqdcffv5zcw6qooifqxgl26bxsoi2n4waq",
|
|
||||||
"baglacgzagz2qudg5ucppkpoeu5iq5nu6q7527mltt5i5kldaeffx4djhnxoq",
|
|
||||||
"baglacgzao3ht5gq4vbud5g5wbwsx5wejlbvgecqqadditqhk5yhbgw4tkbna",
|
|
||||||
"baglacgzacuetfnthnppfxkfzgfza3exvy7gselbqv2s5b6czidll5exmqwza",
|
|
||||||
"baglacgzaqbgeg6rmbd2zxpucpdd73kb5bmmo6p2p6eonafojtqkwi563ycoq",
|
|
||||||
"baglacgzape6j3mhckl4plr42twds57ctqwvwgku5ymjboy33gue7z5xqwaia",
|
|
||||||
"baglacgzazy26zckarnz3jfpcwpqo6rwr5r4wy7bonmc3rljbkr77uoiyoxca",
|
|
||||||
"baglacgzabadhauzo4lxjpslyal3fb5gfrs55hsycsd5r2mj4mkvcgypcvs4q",
|
|
||||||
"baglacgzao7aftivtmdu4sz3inijqfjajstgwhka2vafiigmr3dz5on43ndvq",
|
|
||||||
"baglacgzahtfb5mhojo7zknjhyhnf6o6d65wkz22ellgvxvz2cf32dhrno35q",
|
|
||||||
"baglacgzasx2czupgncbldxwxkqkxez6tt2oldw4iocqrhc7gk6bgp26g2slq",
|
|
||||||
"baglacgzaqeijuarx6vrtycc5267h5g3xzgskgaylrftmyjq7vjouxvkb5cvq",
|
|
||||||
"baglacgzalc42jtx44sibtcvjjhz6drbt54y6lcxy6ucmngi7cvdbajiebndq",
|
|
||||||
"baglacgzahbvb5fbnx2ddikyx4lulfcrftvw3mxpy4bpziskruce3xhz5tcpq",
|
|
||||||
"baglacgzafgf6pv43422ibuujti24hazwtn3ohwylzgo3mt6qu7dven4zlqdq",
|
|
||||||
"baglacgzamet5xv7ury7dnkqy5yltgbvalcl4ricsvdduy7hskmyxslvsa5sa",
|
|
||||||
"baglacgzakxelvpgmk3loheqewteco3z4pusavgv3cjj4xzylahmsiqkwovxq",
|
|
||||||
"baglacgzacqbsc6t7cqligdehacd4kjg2xlpdtbjhd5xtngqswaiiqpdrsj5a",
|
|
||||||
"baglacgza72em77piwedfycox3l4y7qbskqlptpcy7r725im2tpsj23si57ga",
|
|
||||||
"baglacgza636axkok5ao37hjupoeksmk73f3rpimd745avfcoxzwz53bp3xiq",
|
|
||||||
"baglacgza5n7yqni36tyi7clfxxfqciib6j4e3fru6ye3eticdb4b5i6k4m4q",
|
|
||||||
"baglacgzanbkitjrv36vsbyxc2fazsncuapltoqi5yxyntfjtp52dfmw5z64a",
|
|
||||||
"baglacgzazswo2typlq7izwoll6w4xnd3dszwktreiszh3b7w2kt2ucll5okq",
|
|
||||||
"baglacgza44bydaixin7ymaidhsaawjsemc2wkds62ahiaqrtctpvzo6xitaq",
|
|
||||||
"baglacgzay2b7jkphp4kufkhmwiriduyg5kgmqyzjojikd6hvib4bycl6fkga",
|
|
||||||
"baglacgza245jp2gg7wvxvbuvdxxynbsfzynj767o5dv6tkgsaghgsfsmvfya",
|
|
||||||
"baglacgza7hvenpvtima4lqksljjfeiou2lwhy6h7qvmdaxrvp6iglprd5ecq",
|
|
||||||
"baglacgzarrbzhd34po574cixc6tk2wd4escxarqzoqnlmplqkirhq2ms6wla",
|
|
||||||
"baglacgza6wjkyvgipgaxhclghpthoftpkarjiprp4g2smf5b2foc6nj7e7oq",
|
|
||||||
"baglacgzavtod2r5swzrok7fapkssy4mufrtid37trvz2jxzhnifxh7rdgxdq",
|
|
||||||
"baglacgzaaju4hfbrfcsgxp2dqrqdjrrfdjwjhbcubmmum3wsveqgsisv5sjq",
|
|
||||||
"baglacgzagfnw4qkfwuqlrd7v7nryxergohxb5s6lmw2xxgsl4zikwh6odu4q",
|
|
||||||
"baglacgza3ieihinvg2srwi7dupigwsahksvrlhninkyxt4ewb426uqmqtjnq",
|
|
||||||
"baglacgzaapcyag7sitbiyxcdbbj5m6l64vhx4gt4hbhvdwgjuhoezwlmw5hq",
|
|
||||||
"baglacgzam3qbvtektatlypk7kkdidh6fra67umeugmy7dz77fful7rl6ulia",
|
|
||||||
"baglacgzaeifznjadvk52cuv3qvbitazdkkavu4q3detg7xqhmsuykaemme3q",
|
|
||||||
"baglacgzaqdcmhkhjwdwatfshq4axfenrhggqceqrz47yiupwweqknnrvqfya",
|
|
||||||
"baglacgzanr74m4zutwqp4ybkpgdyborqoccfnigwlv6ze3hyou5jlrrnxchq",
|
|
||||||
"baglacgza5zaewwegrxjtaezosakyqpplolmav35eqfdyjju5okk3tmogbtkq",
|
|
||||||
"baglacgzavsgqcwu6m2hvq574yoi7vyzzqhaak5yjn4cflnbn6t4oqce6zysa",
|
|
||||||
"baglacgzafnsgu5ksxa4sv2kcmn2x62m2e7losf3ljqdlt7akoixyso4wi6kq",
|
|
||||||
"baglacgzatcbgkfcnzesrtyfe5hxe2yuqek2hvgmwvla2zjo3i4rvhnb2k7yq",
|
|
||||||
"baglacgzavzdzgv2mihwc6qop5hkv37hhx26dmnq75sfg3jf4nkq5vd4pjvja",
|
|
||||||
"baglacgza3oids2arkgomy6bblcggrwooaqyj3foxbxiawhckxhyc5phxqzgq",
|
|
||||||
"baglacgzaj2yfzqrtpjd6luyv7spcs4xyrmrifsxm663zznegzt2omto7ktgq",
|
|
||||||
"baglacgzaegino24jsful2fjnpe3haf3hhztdzzm626rdtmksxauccfzv335a",
|
|
||||||
"baglacgzazvm5p6m3ynh74glcwhuxtw7b3hv47ml5y6mtif2whmklebfd2mka",
|
|
||||||
"baglacgzak7v5o37lheriih5julg5c37gc3wpxmxudysjo6fttnju65efl4ma",
|
|
||||||
"baglacgzafkusmmr2rw7vijysdeldocemzrvwszho6nbvxakcy3buf3ytk4oq",
|
|
||||||
"baglacgzafiiwa2wygo4qm76xt3tekscp4ioub4u34vz2aqptp56frudzgjkq",
|
|
||||||
"baglacgza5vqm4jugxseggsbniznupli2bivz4drwupzzyfubqmt2cggrk7wa",
|
|
||||||
"baglacgzae27ionu7mlu3ojudqd4a2ywhyrenxw7zrshr4jhy4ld2fqpgkkia",
|
|
||||||
"baglacgzajdmyteoo6aovcp4w2wfnqlwp7hhncrgkajtqm3fzbxo3zhoko5na",
|
|
||||||
"baglacgzaan3c7frug6yo5tyyv7kzn6bzrxtwkwy35bmuvikkq3v4i6suovpa",
|
|
||||||
"baglacgza7p3a62673mtcsidsps3ep3atul26nzldgscxv66rvkmqj2gjdejq",
|
|
||||||
"baglacgza37tily665vel2tvvcavpqtj7n3qot3zxvpsog63iqkxmfldastva",
|
|
||||||
"baglacgzaeuvjvxxqf42qg44zjlnpje3ls7kpu2hx36uho45n27jjikys2jiq",
|
|
||||||
"baglacgzab5yedqfwm3pczaqnqfvsondxhdyorywu27q6strjbc4ixq3glizq",
|
|
||||||
"baglacgzanynqqlgddfsdtm27kvidm35d75yocvndtsdeijt7z64xkilxin4a",
|
|
||||||
"baglacgzai5bxsipie422mzr6u2itm3wgfyg7p425rcqn2hg4453fxnepaa2q",
|
|
||||||
"baglacgzaarg23ok2cd5nr6jc4ocetujiqb7nnrft42xvfuh2vbs35dfyqr2a",
|
|
||||||
"baglacgza4ztanbjvytkd7462vy5jbgwoqypahkw6gzi6a2h3ktsisf4wajla",
|
|
||||||
"baglacgzaqp33qaf7bfj5w6e4k63cbrc3oqemubyxgjmv7wjcroatsqflba3q",
|
|
||||||
"baglacgzamwsrbjbo7pyf4ftaizzj2lsqdqhivh7pu2evcgraenjg6sx573oa",
|
|
||||||
"baglacgzagf4zu7uebnql22h7pmuxotzjcs2y7y7o3dz3nsogfou4dqxa7pja",
|
|
||||||
"baglacgzaaqveulltjfdqenhsig3nzfwdwxso3ndbgovg2gnczkqop7vpbbvq",
|
|
||||||
"baglacgza22ifq7h6bot66tpn5xudjfcqtydvk7bcang7lxosyfum4ifhd4cq",
|
|
||||||
"baglacgzarr6a6fovyug5em3cqkzmggna2nvjohihdin5ffn4f7k3cm2qc5gq",
|
|
||||||
"baglacgzaao5djij6f4x3jp3qszkawqwusvofe2mhloopb55yoyzfqxkezgsq",
|
|
||||||
"baglacgzavcbrgucanfxqhbshrz2hv62vfkrtrhlv5qx6swbc3zavqvcn6zta",
|
|
||||||
"baglacgzark7ier7445klswjg5eqx5qxoiibq5mrmbctybd2ffu4gwffqkwyq",
|
|
||||||
"baglacgzacahqtmufgqhyzdgynhxsezldqc4merrerrf3y4jw5d64umjg24oa",
|
|
||||||
"baglacgzasfdhsvcjbujhmmosulzzu3w2xvyccu66qf76rwrkxgrqke7fy3oq",
|
|
||||||
"baglacgzast2lxo3sgtk5qtnp64mwxyjuozwyt5v3rg4ytrnleporcqmb62ua",
|
|
||||||
"baglacgzauwwnb3h5pxhm2h3tmcxrc3t52jlbpibalnpywnu34p74pbge6wuq",
|
|
||||||
"baglacgzasb5vgdsv56jygtmspwoswmezrfnp2kray7xhuszshqa2dfrs3ypa",
|
|
||||||
"baglacgzabhaasbte4bwnubvxduslb4am2dotafbel5lxvzki3wn5rs4dl24q",
|
|
||||||
"baglacgzaqm53klhsbyfek6wnzmzsah7iz2km2euk75yapvez7fyl73gfxhxa",
|
|
||||||
"baglacgzawaf7gawvue34nkiksyyrpizlmtkuu275e2xxhaxiirhsmmoeo5zq",
|
|
||||||
"baglacgzaaqtskzrmoaoexhra66tmvdxne353oxcxuzq2dca75ldjrqqhoiaq",
|
|
||||||
"baglacgzao4txzget4reg6nj6uwptwdu2n6sohzyfeivkdwdzvziouna2uvua",
|
|
||||||
"baglacgzanm2vfedt2eqsljbb3iwri7hu73bnb3rqgrurkmrsacfzejju2nda",
|
|
||||||
"baglacgzavxzbb6zhtlf42msx27zozxk4a6twphs4qsxchlrt2ny6t5we2t3q",
|
|
||||||
"baglacgza267mwypnyml7gmua2bifcmpndmtwzzw2dfjox3dfixo25uopnmda",
|
|
||||||
"baglacgzat2wiom6pryjqdoptciek3ckt3ctgdeujprivuey6ypgfsjypr65a",
|
|
||||||
"baglacgzavz4xq4u5fosiyz7ldtzluikmtco4k3mv4xsrnppjz5omgutz6abq",
|
|
||||||
"baglacgzacj4uv2ru2opsecdduklxkbxl4vkvyk3ercuunh7nsgfxit3h23mq",
|
|
||||||
"baglacgzav3o4q33y7amd7bgpfs5xc3kog57nnhbruh2s36pziymkmv32dpgq",
|
|
||||||
"baglacgza7hx5cpakzowq2h26ocionl2t2p6ifhui6pju5xug6wgifi2xkv7a",
|
|
||||||
"baglacgzaty5w2ykcxoxf2zfdcr742hzezg32vyanvv2qz6hbox7atjqknqrq",
|
|
||||||
"baglacgzaoyoxana7gxkhxwj47iiqjv76y3ktnk3kootf3pzfpxcpmzp6ptma",
|
|
||||||
"baglacgza4x65ftjd3telo3eyyzrgosshvnlu7kj7enzezkwiowxsentq2twa",
|
|
||||||
"baglacgza2u7imlxl3apzarjovwuegtp52a5h546qnvw3hzumxr6qlx7yd3aa",
|
|
||||||
"baglacgzay2imkpytg6m7kmq7oloogxzgfc6t7sm77spappsm2iajkdsqif7a",
|
|
||||||
"baglacgza2gxxoee4k2cxdf24whfylc7x2eb6eshvrunugemjp766sxhbx6qq",
|
|
||||||
"baglacgzaz6sqay6zefbflfsyrt43nsszivnrywlokmridmcox45ehavr2bxq",
|
|
||||||
"baglacgzawx34khb3fvi5s7yxduvtrjg7dj6avtc6wdpenpxp6tih6xwsbymq",
|
|
||||||
"baglacgzaxh6czvlet4gmuorror6l6m7qrr4ymkolyr4lzofbme763w2peijq",
|
|
||||||
"baglacgzaw7it5iumtdpxyfxvlizcwsthfsemmyjqmb5cq24hemei6dftsjtq",
|
|
||||||
"baglacgzapevdnthqwueqltoge7dt2cuxvijmhep7rw6cnp44pemp6sluitka",
|
|
||||||
"baglacgzaesu7doagjxn3mknma6nifhvfjoznwlgjqomq6jpxlcejioxu2upq",
|
|
||||||
"baglacgzahojkgpcys6csj4cos62mt6fwb32xsoca3l42qci34zqjmtyvd7gq",
|
|
||||||
"baglacgzauefudv2ingzufqe36jloewm3xketyjvnc4e4djtpbathwjm66a2a",
|
|
||||||
"baglacgza6z2kpaqbk2lezgrkqrznv3c7uaomvab6646z7qo6n3rsbz3qpbka",
|
|
||||||
"baglacgzaeqh6atyhyht4qqqvcyuxdg3uqfu5x2mujowput5bjcuor4vnzrla",
|
|
||||||
"baglacgzatwt5s5k74dcvrm6d32p5zx47fcxgihzyzf4hwbnxhkzcvzj26pra",
|
|
||||||
"baglacgzaszpquuoaaaq3auktxvag6h3fuwpnnrv3chfrymdwb5khdqwfxa7q",
|
|
||||||
"baglacgzaf2bu6l5bt57gstxyudjbbrj6jddfac3qmr5jnkt6tgwbj3qpfavq",
|
|
||||||
"baglacgzaeph54ay7tbgyox3437nbngzluz2k4kkqmjh6ymgbuakg2c3mf2da",
|
|
||||||
"baglacgza2wso6cd6qxxk7kwtcgcx6gg3nztqk7h3kepb7if653mn7magazfq",
|
|
||||||
"baglacgzax6ioorxkqyls3kmv2ntmfhsbptavrrtit2vy6zmgbnltjjbyogpa",
|
|
||||||
"baglacgzawf46giyla7nssrdtvzl7afycmj4y7dcvdr2vwvtfvtqscxhocdfa",
|
|
||||||
"baglacgzamyk5sdzyg2vnuzaqmbwwzqbbh2xxgfcouukhmcjcudy2jdw2dy7q",
|
|
||||||
"baglacgzaizfqoqu2aubz4iutcsjnnrrfdkdayamouoiaixkznmnmcg24pktq",
|
|
||||||
"baglacgzazcudtwhvet6q264rgjonf6nt2a3omigym5wpabkq23kdeyvxqr6a",
|
|
||||||
"baglacgzatymnlewdcj7uqohfdcrcszva7nzezhgib6risqpenllqdfch3i3q",
|
|
||||||
"baglacgzat2pxiuhdayqh4ma4ss3wxk2uyipuciqonxig3z6jitc5kdmrozha",
|
|
||||||
"baglacgzafokb5hx5vy5ltj4ee6ndad7c5fbak3j34ap2j4u2i3mbt5oeqkzq",
|
|
||||||
"baglacgzakuwsijjghgtk4522uhpxad73slbechnou4ug6fmniqebzals2bza",
|
|
||||||
"baglacgzaxl62rn4xijbrpvuzkbb5awzhuasuihynltlwwau4lij3rn64rb3a",
|
|
||||||
"baglacgzairaleq3xeadqowm7ec7kvxmbjsmqrltobjcqjso545a3zdcge72a",
|
|
||||||
"baglacgzao4vipuem6ogey2f73z3qs2cxdk6rn7jygxfzajegxuxfcxktyewq",
|
|
||||||
"baglacgzafufkadgo6qcmddvnavloopfzmozwxi3p4h3mjn5jw2xmj5ws2ipq",
|
|
||||||
"baglacgzai3dvv53agiud47vx3fs6gpqg5gvjze5xsecatnh5l34e6pgocbia",
|
|
||||||
"baglacgzawug56abirtemcm2skgyexstfmmrvivru3xjcgdyxqtj7ef3jxnjq",
|
|
||||||
"baglacgzau4tmywowb37dv47edd7pl5af222ba23pfrlukvkbersc6vrv4qwa",
|
|
||||||
"baglacgzabqzaabcpgd4pnucu3izbykoognju5kc5qwtfkualy5r6todywowq",
|
|
||||||
"baglacgza2g5mo2mblvbfjjrm6xk2ppf6jplupamowaqb4j67szvaytx3wfra",
|
|
||||||
"baglacgzaw7ftkn6xzbnwyvievvi5xuoqeodvbdwirel2cvx4a6kracedtiza",
|
|
||||||
"baglacgza6anvax7pis7sukuzo6t27drgmckh2ahdork3wmzhqquidlakjpqq",
|
|
||||||
"baglacgzaywc4cisesa54dmxrzulfzvg37ldoe3vebiqoncqtrhdxaypepf6q",
|
|
||||||
"baglacgza5ndtrasv47fgrnbpuvqyaam4mhrn2ma37yqce3lkotlzl5vqc2ta",
|
|
||||||
"baglacgzargpxdk5rrrwjkyiyx5lh7ldctn27p2ksnbz6ikot3cv3nw5vqaqq",
|
|
||||||
"baglacgza4rw4nllzvg5j3kvvrsisd3jcwgq7htdege42ris6ddkpiti65ala",
|
|
||||||
"baglacgzaoao7i2mmwuopg2gfx5m3xn34fayjdrov2yolscqtz7vi5emdqdna",
|
|
||||||
"baglacgzavwgvvyakic262434m7kigrzlmqautwbknymr4fyngjkobh3cyl7a",
|
|
||||||
"baglacgza6gta5cebz7fs3riluwgde3gmtjw2qkd4dzpvnuqbovr344aaldca",
|
|
||||||
"baglacgzao6ru6zkgi7lknzzc4xogdvi5bkoux6gaoj4rejbazar7yavge5ta",
|
|
||||||
"baglacgza2lsx6yk2i5iiy3tasnjvgqult7a4y5lhpi7lr5pxhvq52cvp6x2q",
|
|
||||||
"baglacgzatou7j5blylumwrr5hfsck3hqrasegy55ewwgldtwew3uykaszcmq",
|
|
||||||
"baglacgzaqi5dqutwokxefveag2nibmfzylw6szglsntiybeh4e2bmb6f2xxa",
|
|
||||||
"baglacgzaovkdfxjerufbq24zzqm767juiyt4hcu4ivlpvxh447w66rpfvtka",
|
|
||||||
"baglacgzawez7iipzfpgi2jirdwusmbvhdjporhu77ejvoam7duwmequa4isa",
|
|
||||||
"baglacgzazlnsvtqu4zd5tjtz5bct7d2aqiotmfsfg4eg62bki6qiti6fdl4q",
|
|
||||||
"baglacgzagfqonr7vtlbdofwm34pkoz325axn2v4pxyxbdly5enjbfnwo6eyq",
|
|
||||||
"baglacgzaljokkpwqxdoaoyrmsml6b7b7zfiqefbhwxlmexxepy2d5wuyekya",
|
|
||||||
"baglacgzabu6rq7xkdr5uoe2eunlx773yg2kk2h2lho53ef3c4adky2jhs6fq",
|
|
||||||
"baglacgzab2hdhand5g57pqt4uslpy2mz6rqnkwlvw27bczvsc2tj2m3pr3ba",
|
|
||||||
"baglacgzaugsxw7cthfl3fg2rlhemgut2hhitktn3bovkjd5hawrvi5ss7gsa",
|
|
||||||
"baglacgza6wtl5yiy32ruo22c75ysjtnxrghptmimp6fp2pq3ilpaxqyn6c2q",
|
|
||||||
"baglacgzauokbnjmp7gn4sz7e247j7ift5hrueq4zzq577m557j3bmqnwfixq",
|
|
||||||
"baglacgzac2lofvuakrf675xzz6hh2ahgbd3z77gxc3ofrjolqjqj7dqhzopa",
|
|
||||||
"baglacgzabsc4xuh7rbvblytwkhn4swzctyu43ba36xoehvuc7cpmbnkd3ska",
|
|
||||||
"baglacgzayunrwjhott4rnqk7fniizbsv55apaqalgup2fnf66qip6aartkcq",
|
|
||||||
"baglacgza3zbafsnpvwa5xw4xpjmx3ndhmuhynaoxxrzwcnfxi6o4rbwpu2hq",
|
|
||||||
"baglacgzaqm4ijihatant626rqycd33xaerqj77zivb5iwmgyaqwgysc3zf6q",
|
|
||||||
"baglacgzal6llyltmvocfvqgxq5ltwunaus5ntfhl5ze5f35kd67oj6y5lq6q",
|
|
||||||
"baglacgzauyqu2gqzcc2xtmahbe4bnlubzp2thteevnp6bfd3kxpnxozq74rq",
|
|
||||||
"baglacgzazklwtf65v4dpdcms6yqh4t3kawlz2b5m5lmwk2afq6eqc7gg2bvq",
|
|
||||||
"baglacgzaoyn5xje7zjq52lswouegf3w64k4zhyqp6iclfsyj7wgjfjwyvicq",
|
|
||||||
"baglacgzanrcxybniprkx7bhw3ggpwn2uuigb33ifkdxuavbt2niu6mzmo7pq",
|
|
||||||
"baglacgzaxxsmknpbqxei7ffyjb7fhqtvfrwxr4t6zloyavtkt3jygvsldlra",
|
|
||||||
"baglacgzaaiqagvbyp2jrclsjllilvba5ajksvpj6rsygtcx5suskigolta4q",
|
|
||||||
"baglacgzatghruydgf4lodn6vmjtvfpvf755goj3jkeusdwia5pixldcqjmtq",
|
|
||||||
"baglacgzamfrwerukgoisehrxqlnefyww7ohkihngxxjnm6pcbpydoxagcwda",
|
|
||||||
"baglacgza4ypfm4rxwsoejwhza3housicojqliaimccsupm4nrmjrxhj3n6ca",
|
|
||||||
"baglacgzagp3wukeubt7wqrdq5okknvbyh6rueyo5t2np5rg2whot573jq2qq",
|
|
||||||
"baglacgzaxjrq5medoijedijmlrkevn32vsthf6vhgtojvtlttxo2ze5brbja",
|
|
||||||
"baglacgzarwmkoc2al7nxgjxdysbzdiq4yfcbthxhbs4hkquxxnevsoxnwc7a",
|
|
||||||
"baglacgza2jleouo2qqbrfv7uc73q6aw4svm74ltjhzhsqhpmqdcsxmvjxurq",
|
|
||||||
"baglacgzajno3x77dsi7inf4voolwgevuslix7ays2u6oh3z5mq2klkwbj6hq",
|
|
||||||
"baglacgzar2p263trvudcq3qwjppcpfgzmxc4taacjtekhkfzsqtatl2wp27q",
|
|
||||||
"baglacgza5efjepjsmz2y65dfccco56i5jvrkn3wochllzfze6k3o54qkvlaq",
|
|
||||||
"baglacgzaxrwu73uyvnvmbfuepvcxeryunic3ozbn6t5uxwypoy4puej6z52a",
|
|
||||||
"baglacgza5ux3uey7vxvn5miif5lf77ywz2yar5utavxdcqbai4lma4446hqa",
|
|
||||||
"baglacgzaufpcg6e6rm62ybb2a35vwtk2ptqt4z74pj3zmii6rx3a3dwnnw7a",
|
|
||||||
"baglacgzabnitw6kehgnmpyrjdk343qnzt4cekjlmypymhnvvylkq5k2ptcdq",
|
|
||||||
"baglacgzauckhnf4srmqecrryxiflfpf6kavfhm3d4qmjzkxg27f5dj3546cq",
|
|
||||||
"baglacgzapxzpwc5xrysx6y74fs6pybyqlfly3olnv5zaazqsbuztbopuc6jq",
|
|
||||||
"baglacgzaqtea7gzv2h3jroibscowoifdm64hvqievgvxg4v6kymat7e22ncq",
|
|
||||||
"baglacgzantxg5ciyqddbw2tjz5kwrbh2lmxikruq5ifa4xcfsiwfgs2fheja",
|
|
||||||
"baglacgzajv4bm22iarh5ykhneljp2ooi35xyvkqezny5hilsq2cw62et76bq",
|
|
||||||
"baglacgzajiyfhc7uqabfypgpvip6dildryb7c4epz3tzxsoejbliwozlbphq",
|
|
||||||
"baglacgzahsh7cceh3en65fkgjesotsxs3pqbhflxzv5kdkxnz67jd7c4pczq",
|
|
||||||
"baglacgzaz7hm3bnvwozlapazmwe5hu5zxtin37ab6aam32p6hsvudxdkbila",
|
|
||||||
"baglacgzaz5yvtye7y27sz7oitmxfgt5yvqdzcn6z6x2vxar7rvluzqoh6dfa",
|
|
||||||
"baglacgzafelbojewhho2qlzz2d7txvh7ycbjntfmqkwdxkiw6raesraqfznq",
|
|
||||||
"baglacgzawat7pexa2n2lq74lyoq6axky2qzzyf3h6sa6hrucjc3z45elm6zq",
|
|
||||||
"baglacgzahwk3er5cckpklgmlw57cna2p5hkwwekjkkh4iz62pm5ybievfqta",
|
|
||||||
"baglacgzabi63cfckdctmkqdhbcdwszzatr3bfcyyuaocrgnypedvjmjog2za",
|
|
||||||
"baglacgza4fxgurqdgfxs7ja427ikr7e2rxfhzi3hmov6hg4z55l3qow7kaiq",
|
|
||||||
"baglacgzaxq3k23qmqsllx7iz2ymhliqz2jewob2nckhdd2wkxtf3rb5drpwq",
|
|
||||||
"baglacgza5nzqr7e7b3h2gmbxz24vdcmfcoadnzbie6nbtvigpyfigqerrxja",
|
|
||||||
"bagmacgzakvveqidigvmttsk2gqjl3mqscorqcsb63mnwiqbpwzvmt42ygwmq",
|
|
||||||
"baglacgzalodtjmdplb7dy2p5arsxk7nyszh6lhsyzxe4lgkdgrp6rymxzela",
|
|
||||||
"baglacgzauzvc7x64vjf6wlwaisddf4vf6hjsfmtlypnadtb5i7kbbasizmma",
|
|
||||||
"baglacgzaixlti7he2ffvgp6raqotxkdsekh5qy4duv3tmtn6kvn4n6sjuu2a",
|
|
||||||
"baglacgzathtbu757wgovtxofbnlsnsyad662vbnn6aqk3oyyx6xixtxsw3oq",
|
|
||||||
"baglacgzaz6ajmdnij27zbfrxugyesam5i6m6cezxfveoxjadnolwjelszw4a",
|
|
||||||
"baglacgzaxzceixddm72q4dlup2gwlsoxfykcejxavmskrbravtwa5xcvnktq",
|
|
||||||
"bagmacgzavl6vwffg5wwncspbcc5go5vgktznx76kgqeqfputhuarce7soubq",
|
|
||||||
"baglacgzawksvmxhdtwfx7k5silyip4c3ojz255cast2bmycgzxozpb2rys7a",
|
|
||||||
"baglacgzaywze5wn2o5cvdrdekjdjeet3tt36r3wfzwpcop54iumbvrex6zpa",
|
|
||||||
"baglacgzakbsr5nin4suyz7r3xxzcxkuel6fghs6zrbw2yi5ez2xo7nloerpa",
|
|
||||||
"baglacgzay5ujimrt4qi2ksavtfjysqjsn5m6ysxizi6hg3gqhpnuj362d7nq",
|
|
||||||
"baglacgza7q5xdqz6fzvxprpesta5w763wrduopyahwxtpdd2mo5jx47qasoq",
|
|
||||||
"baglacgzaisv2zdtclyzxlffct55zevsfb6wxmu462ft7et5qahpdqrnmcsba",
|
|
||||||
"baglacgza5yyio2rxxtbrkpk7vvv2iyp7pfp4bkismdma3mk6qkxlhsiy4f2a",
|
|
||||||
"bagmacgzaugn6dwvyjeqblgmuhrlxoerqgrzpev6uhsmi5f752q7kfsdiuqxa",
|
|
||||||
"baglacgzaq4oyzbuduaeeg3ww6bzspstpbtcb7tiyswmaaymfpvao2hqwxcva",
|
|
||||||
"baglacgzabqho5affvmsfef3cnd4xsw66l42d6ena4g2xedujct6qsd7o4a2q",
|
|
||||||
"baglacgzapohhuiobc6gsqb2pcv5vb7fil3rfyeswr74os4dnzpg2zn337bka",
|
|
||||||
"baglacgzaovc4t2yesyqvzvdsybtp5k2y4tb6xy676gwnwsr5qoztogehxj4q",
|
|
||||||
"baglacgzami2ovudshhpsyi6vbuq5fycfgmv3hyx3bjacvlsxqc4chz6vgcda",
|
|
||||||
"bagmacgzafb27j6ni6j5vwm7kfxfwfuqau7m4raff5v44ulu77z5wwp2bpnaq",
|
|
||||||
"baglacgzaqw7dbrzdyxhjsdn22orpgfzxxwdqcf7hn7ugy4hl665cckc5oxja",
|
|
||||||
"baglacgza5psrwfh6u2vklqex6jigq5hjscatynwnge4z5y6xeztn4lo6h7ga",
|
|
||||||
"baglacgzauiscf2uzdir25zlogw4qpzwriy6mtgsyzl7omehok3jpmskk3knq",
|
|
||||||
"baglacgzas4zhiutice4t5if7jai4vedxkmo3adigxbrdpixm22b7kw5exsya",
|
|
||||||
"baglacgza3tax6aemhf6t2lqknaazzsksu2c4fjllgjx2izlkv47qmhzfgtwq",
|
|
||||||
"baglacgzakncmprlqvhlj4nfejd7odbude6hmeykm6wspwqpm7bg3xoqi5dxq",
|
|
||||||
"baglacgzaa5igkis4qk25v4ko6eryts6watdot3ark5uzlxm3o7j3izolxala",
|
|
||||||
"bagmacgzaomwzsxiv5cwrrjquk4ryb6z4u4xhuu5xhpznph2oyb53ixrsvvca",
|
|
||||||
"baglacgzafjhvq54vejfj2vrvtidr6nlt3e4azkw5jg6kdnr2dot6edm6mzsa",
|
|
||||||
"baglacgzasvs7p7bsxtnrb5fz25cx5gyh43tqja74ywrhwpmt27gnni4z3qda",
|
|
||||||
"baglacgzagrolvdnsflcwzcmqnbbyon3enber2hlamdf77kvhwousoyznwika",
|
|
||||||
"baglacgzahkj5ojwxjb4hjzi3klmnkngghkrknco7ddr3gb6a23fquoeladzq",
|
|
||||||
"baglacgza2zihxbb2gl2daaft5miumsjqbps3xgmip2r52ubrpii5zkpshpvq",
|
|
||||||
"baglacgzakhvmbzxior7nsroicglbhkbvts3weihhcrqqz54dhcgosaavgiea",
|
|
||||||
"baglacgzaqlswzpybvsbc3fqkr4iekizldlug3ak6qsuthtu5qtybmtij2lia",
|
|
||||||
"baglacgzaajspycacn5bhe4dpspprjoayo72z54wmrxz5n7m2g7of3eazijqq",
|
|
||||||
"baglacgzax7i3elt7nndzjenb5xkogpgelmcmmtn6lqp5v6kvyfqe7m5k5sya",
|
|
||||||
"bagmacgzauubmsoyzddcmmu2niwj24a5fui72cdv4gd73ocalff576jcg4qwq",
|
|
||||||
"baglacgzasqqcuuppbzjikphak2gz56fnuysk4vnlq6andul7yvwolmswisiq",
|
|
||||||
"baglacgzam2xbzezi7l6vlyicgx6i3kpiqceh5veonhmpa4pjny3eibaeolwq",
|
|
||||||
"baglacgzabirgkutruwdjfcpl6bkujicvpsixkwfjh5hmuy7xoamdysl23dsq",
|
|
||||||
"bagmacgzayktazfgfoa6a7g5ijetwofgbp4aphqxbok53sqoc7pfydslq2moa",
|
|
||||||
"baglacgzalvkdmoxvvqpflgq235nahqiw4xofhxzhuio2eljusr7uhrch7nnq",
|
|
||||||
"baglacgzazsxzdrr4wtg24th2crzvzt66fhg7dy3zppagpy2nn5eesdrsaq5a",
|
|
||||||
"baglacgza2vpmjbvshqsmj3qfuh2qfcx5kg654uhqbknb3ok25ppmhnfd35sa",
|
|
||||||
"baglacgzadcjenr5pr6xnfr6t7b64rnnfdv4h634k2zm2y34roiuuwpp75vga",
|
|
||||||
"bagmacgzau7hv4cknn43r7hxusbijdicen3yvpftldneg5zc2xmstgvhft2ra",
|
|
||||||
"baglacgza4fxgo45wl7zhyqula5ahuljoi6lreftfcwskipwmhrcejv35j42a",
|
|
||||||
"baglacgzasoghibkt6mikv6sjvnvv6zci47gjmnkumjzxhlei4tvq53e4jstq",
|
|
||||||
"baglacgzaivd7643lhy6s535ukinqa24onqywzkfnfhhi5r7uvawxtiw7urza",
|
|
||||||
"baglacgzaqwe44wrh2zpa7ogoka44yx6hox6w55jnndhymz4nerazqjgxedua",
|
|
||||||
"bagmacgzaha7rcryssphnazakbiunmc42bokxd5sgzrbo5cnilp3g2zt3vnxq",
|
|
||||||
"baglacgzab7lroi2stb2cmi6awpfpwpsl3bwawwvr64ijpng5dhz5nes5owgq",
|
|
||||||
"baglacgza6l4kyy7nsrg2lahabyhvclpuncic2sqtzvmefqofpuq5lnsdhmra",
|
|
||||||
"baglacgzacsbz24qw6iy2vviclvzaegksg22ryng66bhuxpj4dl6pcg32wzxq",
|
|
||||||
"baglacgzazrli3jvfluavjdjwgkt3qktktnuh6set2t7ib7hzhanobmwxwvla",
|
|
||||||
"baglacgzankthcaoqchi4el7hhhxyhmclkikkhyxy4grgexml7wyrnnch5bxq",
|
|
||||||
"bagmacgzaf2zl6rp5iq55dx4ln6oas4tkjrrffihxrfvbggqidy42p5sewoeq",
|
|
||||||
"baglacgzav7vn47ouq6zebmg3img7nmada6ag4hx25uouzqxttyptyudr46bq",
|
|
||||||
"bagmacgzasc5m55cldco577of6ixny4h6fggfrzpfeptodx67pw6g2zl7punq",
|
|
||||||
"baglacgzaerhefaw75qz4to3wkfrm53spfzrzaaz2ss3cbvikf7djipv5ql6a",
|
|
||||||
"baglacgzahax3xfs4df4ywelodmzk2zgztppqt6hu5vgihyntrd722dxixrra",
|
|
||||||
"baglacgzaeqyhcnkoumzym36selclrief3po2p4yj62juga6r7ueszzq7fsaq",
|
|
||||||
"baglacgza6oydtjhtene6qxdyfuiwjqmjbzn7c25nzhxez6bh3nvp2irj3xta",
|
|
||||||
"bagmacgzae3xnnb2gakf4g2plivvx2pxeowvbn42ol2vazgh55w44lhv4koya",
|
|
||||||
"baglacgza3esavhjnlbi5awux74zqkm2n7wybahq6gip4e6osxm6k22x2r7ea",
|
|
||||||
"baglacgzatxyuvssxlehlznynti47jiaoyj5kqevfdmu7yj4npmjr6l6uyhfq",
|
|
||||||
"bagmacgzattugdfyxhykoayz5xbgor3vdfrkfj3v6svdxsjkwis2fw4l6rbaq",
|
|
||||||
"baglacgzaf4sjbg7ya3pq737z7im3pmp5vubrly25hfkvea6n7pfapib63kyq",
|
|
||||||
"bagmacgzagkghv6zmldxt7dcbc6uoxuzw6gtb2jczcbt63hc2v2khs3fmtb6q",
|
|
||||||
"baglacgzavy2t2fxjdf7pgnx6dzz46eczpnjdwveeiihq5ev42guggtnivpxa",
|
|
||||||
"bagmacgzajkxbxnhzvomtm3vz3rtsokavrzinenk3anvvqwog6tg6byve76nq",
|
|
||||||
"baglacgzahkjgb63xoh6ke37ztl4npobu2gkyh3ae3jjii4daodh7utnujiqa",
|
|
||||||
"baglacgzacthcbn5p3sqfzmpzrndyhbcmneuptrfwr7s5disl54oz5nxm5s2q",
|
|
||||||
"baglacgzam24ldzjqb3puomhwshglrtjcyrcpkpva2wybbkltfws6tor5tp7a",
|
|
||||||
"baglacgzaqkecamlmyav757mjtk5ecnaglh6qnxy6bidzmkd6yksbcarz63ja",
|
|
||||||
"bagmacgzaquqfnzlnbsk5idejdyvjpchlahovlbrt3degno72rl4dc6htsymq",
|
|
||||||
"baglacgzaecczvtf4q7l2mhitw2tn4y26ysaolmicnoc542wkyvvrs47o7a3a",
|
|
||||||
"baglacgzavs7qjikqvxuxkpz5liqdyqrzaonkllqw6kd4lf2cxjltxxlgz2gq",
|
|
||||||
"baglacgzawwi2ftqcgz7numopfulozj6cp7ke3pyims3e5kbftljwnfxlfica",
|
|
||||||
"bagmacgzavhhx6zz2bphhn7kagmvp5bqbkqurbnen5jcosojtups6smg2lumq",
|
|
||||||
"bagmacgzao5vkivv2triaryb3qk4edkopf7a6qv4m7lgvzeavqbhk4mk7c75q",
|
|
||||||
"bagmacgzaolr6fbgupow3wcs4ufbb4elz2pvjbtaqpbnsnn2pxcub6d46qqma",
|
|
||||||
"bagmacgza3x3z3mfdnugicnf2cq54wva42r4vvgrlv2fmuc5cjogysy6cu56q",
|
|
||||||
"bagmacgzagatdibfm73qqhufragifh7zsid6oim6gtnyjqmlhgkc7uwehzzga",
|
|
||||||
"bagmacgzamsaplavqsdtlvhzyovqewgkyk26azgp6tfdbzz5ux3423eajsita",
|
|
||||||
"bagmacgzarsrnwni34m76ucixyqhwmzjzdoj4xyqzcepbbxzzg5kim7edr7dq",
|
|
||||||
"bagmacgza7dy7xmpxwsbntbqeqd7oxob76vfiw3wb5llbzr6s6joxyalft6oa",
|
|
||||||
"bagmacgzaxfz6yd2i64il66pwg2eeqv2vzpuh7hkmnazgxob4e2xwecacvaha",
|
|
||||||
"bagmacgzaxrdsjyn4vafqvzadwgre564iakz2owgrueiyjr7nh7evfwksnizq",
|
|
||||||
"bagmacgzaxqrzefztg4772fnaxzrwhela4py4iybnsucowa2ybg3jolflfdba",
|
|
||||||
"bagmacgza6ccvgsnpnp4ev7elzixnumoi56gfcon6deu65m62jotlncubrsya",
|
|
||||||
"bagmacgzayjy6dcno5mo3lvm5p7uh27lde656pt5drfqzafsfsgles7pdztpa",
|
|
||||||
"bagmacgza2ved5k3y3gr3yqiixnhlzwelsmbxmyknsvg4ci4jiltww5alcxma",
|
|
||||||
"bagmacgzamq3lujnpelx5hm2l6heowtohkwhuliyq6r34yty4hrurctkscnla",
|
|
||||||
"bagmacgza45idxjlztz32umn34eyqymjmuf5syw6mr6ry6jtgoxupcvgckfvq",
|
|
||||||
"bagmacgzafi3v5u4p4fgckxsrbf4u3zz64gfszz7pyihxhqio7ztn77yjwcqq",
|
|
||||||
"bagmacgzatjwpysdg24pamvqso3g4tjchz72pdxsqweyuubc2jrdeusscvmra",
|
|
||||||
"bagmacgzasj4lqrtjnu3scovz2iff5nblapntc46ojefc545s6ozwablz7rrq",
|
|
||||||
"bagmacgzas7lcbavos6lvsurhbzlpekgh35dgarm7nye26e7wwrooolwfbpnq",
|
|
||||||
"bagmacgzasmhzm736xpvahwm6jogaqeuieqsteffkfxfsq4gm6eb4q35a5d5a",
|
|
||||||
"bagmacgzaw4bsyt4rnl5koaclh3bkzwk6ez72sj6j5ghsks5a2r675l3tyytq",
|
|
||||||
"bagmacgzacmg7rh342shchhjofzwlwxrej2psqkf43jurovkweqpniytdzvha",
|
|
||||||
"bagmacgzacy2ji662bc7ppplvkyxlvjxqiwyo4j2ie4xtck6l2zwtbf2w3i7a",
|
|
||||||
"bagmacgza5ecbawirj6ojccw6zijnxoq75543fywirgps24qtzurn7zbravqq",
|
|
||||||
"bagmacgza2vdmjsrcpith2klzmzbqgjbcg5dcj3iqtm6zjbemlxagxlhk5z3a",
|
|
||||||
"bagmacgzae7ci4iimzrxac2dl4lkkdgotl4hb5dpwesunhil4cy56rbq2zvta",
|
|
||||||
"bagmacgzai7cz3jllwk7tjde52kror5ktrkjlsbfwmhh6kssctc4fq2f34scq",
|
|
||||||
"bagmacgzabu4xfmjm7dg6rf2fjjn62f57ilrchh3v4gbf62erabtzu5wm2gxq",
|
|
||||||
"bagmacgzanjgius6avm37j2fq46oahss3cw4g5ntlfjzf5sbtguzppyai6pta",
|
|
||||||
"bafkrwibagt3z4drtwcxgx34uquzaeg5m5miwvxzgczdyoa56y2yxgkprzq",
|
|
||||||
"baglacgza5n2ivltmbqypzfjptsvbzvlvhpbcbzlr7xj6xb7zaallj3q3bu4a",
|
|
||||||
"baglacgzal5gkbdbs4srzs7iostmji3r5gypmlubclwonqxdn5dkxfoyktheq",
|
|
||||||
"baglacgzaeggi6pqszfefbd2or7verp6bbz6b7ctkszxi6yalsypnivkrc47a",
|
|
||||||
"baglacgzawxfq5gj2pt53idroosz6eahmfmrwxuz5fpciiwmiuts7l4a6k2eq",
|
|
||||||
"baglacgzaj46wxqbpstd5eicctecpdxhffmbuenzqmd3bt5jdjykdr7aeo3aa",
|
|
||||||
"baglacgza7lwpiwksommncl7ofw4nqxcu7qse2aqhxizwuapds5mtxaa24ypq",
|
|
||||||
"baglacgza7wkyigp25224rkrivwellawayv3y3r4mobbqc6xxmgscxgiq3gea",
|
|
||||||
"baglacgzazrwcvecxj5bq6pyshnxvp35apsxcdtfzacvfbvsrnaa2vag4wnza",
|
|
||||||
"baglacgzabchzwz3pjqtrnx35rjav3gmxeh6sbw3l7mjpwrb6gbiz5r4ltcgq",
|
|
||||||
"baglacgzaokokv2ioov6fjlkgkufj4yrplnxdw47r4rqhighqnb354ea4jaaq",
|
|
||||||
"baglacgza5gcozkl7fbpnys3d7uqzmawqsuvic5lrti4hznllfferepgxojja",
|
|
||||||
"baglacgza34suygwx22xxdd2fynck4x6fjrrhoaxloeni45znn5ewpk3g7lea",
|
|
||||||
"baglacgzasrizkrumchv6zypcuhr5fmtz66ej5cnup5sjbapxpj27ttj3u5xq",
|
|
||||||
"baglacgzad3w24kle2itl3jm2kxq6cysoj4xoflsrhrw55msc6meagt6laetq",
|
|
||||||
"baglacgzazixckhuckariike5abthcbdjgmgz5rcysbuaucijz5d7a3avqvpa",
|
|
||||||
"baglacgzapdoq2uowvqcis3dlzxug57bwzas2dyhefu3f4frrqdz3yknzdxtq",
|
|
||||||
"baglacgzabbcknaso72duwyoeqd4i2gyghf4avilk565nkzduap6h5jwcosza",
|
|
||||||
}
|
|
||||||
)
|
|
@ -1,525 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"github.com/cerc-io/eth-testing/chains/premerge1"
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
|
||||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ChainB = premerge1.ChainData
|
|
||||||
|
|
||||||
var ChainB_block1_Header = types.Header{
|
|
||||||
ParentHash: common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177"),
|
|
||||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
|
||||||
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
|
||||||
Root: common.HexToHash("0x53580584816f617295ea26c0e17641e0120cab2f0a8ffb53a866fd53aa8e8c2d"),
|
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
Difficulty: big.NewInt(+2),
|
|
||||||
Number: big.NewInt(+1),
|
|
||||||
GasLimit: 4704588,
|
|
||||||
GasUsed: 0,
|
|
||||||
Time: 1492010458,
|
|
||||||
Extra: []byte{215, 131, 1, 6, 0, 132, 103, 101, 116, 104, 135, 103, 111, 49, 46, 55, 46, 51, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 0, 159, 30, 250, 30, 250, 114, 175, 19, 140, 145, 89, 102, 198, 57, 84, 74, 2, 85, 230, 40, 142, 24, 140, 34, 206, 145, 104, 193, 13, 190, 70, 218, 61, 136, 180, 170, 6, 89, 48, 17, 159, 184, 134, 33, 11, 240, 26, 8, 79, 222, 93, 59, 196, 141, 138, 163, 139, 202, 146, 228, 252, 197, 33, 81, 0},
|
|
||||||
MixDigest: common.Hash{},
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
BaseFee: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
var chainB_block1_stateNodeRLP = []byte{248, 113, 160, 147, 141, 92, 6, 119, 63, 191, 125, 121, 193, 230, 153, 223, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 230, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 71, 245, 140, 90, 255, 89, 193, 131, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
|
|
||||||
var chainB_block1_stateNodeCID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(chainB_block1_stateNodeRLP))
|
|
||||||
var block_stateNodeLeafKey = "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb"
|
|
||||||
|
|
||||||
var ChainB_block1_StateNodeIPLD = models.IPLDModel{
|
|
||||||
BlockNumber: ChainB_block1_Header.Number.String(),
|
|
||||||
Key: chainB_block1_stateNodeCID.String(),
|
|
||||||
Data: chainB_block1_stateNodeRLP,
|
|
||||||
}
|
|
||||||
|
|
||||||
var ChainB_block1_EmptyRootNodeRLP, _ = rlp.EncodeToBytes([]byte{})
|
|
||||||
|
|
||||||
var ChainB_block1_StateNode0 = models.StateNodeModel{
|
|
||||||
BlockNumber: ChainB_block1_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_block1_Header.Hash().Hex(),
|
|
||||||
CID: chainB_block1_stateNodeCID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "1000",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
|
||||||
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: block_stateNodeLeafKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
var chainB_block1_storageNodeRLP = []byte{3, 111, 15, 5, 141, 92, 6, 120, 63, 191, 125, 121, 193, 230, 153, 7, 49, 102, 109, 236, 50, 44, 161, 215, 28, 224, 171, 111, 118, 230, 79, 99, 18, 99, 4, 160, 117, 126, 95, 187, 60, 115, 90, 36, 51, 167, 59, 86, 20, 175, 63, 118, 94, 2, 107, 202, 41, 253, 234, 165, 214, 221, 181, 45, 9, 202, 244, 148, 128, 128, 32, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 247, 170, 155, 102, 245, 71, 140, 90, 255, 89, 131, 99, 99, 31, 85, 161, 78, 90, 0, 204, 46, 253, 15, 71, 120, 19, 109, 123, 255, 0, 188, 27, 128}
|
|
||||||
var chainB_block1_storageNodeCID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_block1_storageNodeRLP))
|
|
||||||
|
|
||||||
var ChainB_block1_StorageNodeIPLD = models.IPLDModel{
|
|
||||||
BlockNumber: ChainB_block1_Header.Number.String(),
|
|
||||||
Key: chainB_block1_storageNodeCID.String(),
|
|
||||||
Data: chainB_block1_storageNodeRLP,
|
|
||||||
}
|
|
||||||
|
|
||||||
var ChainB_block1_StorageNode0 = models.StorageNodeModel{
|
|
||||||
BlockNumber: ChainB_block1_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_block1_Header.Hash().Hex(),
|
|
||||||
StateKey: block_stateNodeLeafKey,
|
|
||||||
StorageKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
Removed: false,
|
|
||||||
CID: chainB_block1_storageNodeCID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Value: []byte{1},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Header for last block at height 32
|
|
||||||
var ChainB_Block32_Header = types.Header{
|
|
||||||
ParentHash: common.HexToHash("0x6983c921c053d1f637449191379f61ba844013c71e5ebfacaff77f8a8bd97042"),
|
|
||||||
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
|
|
||||||
Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
|
|
||||||
Root: common.HexToHash("0xeaa5866eb37e33fc3cfe1376b2ad7f465e7213c14e6834e1cfcef9552b2e5d5d"),
|
|
||||||
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
|
|
||||||
Bloom: types.Bloom{},
|
|
||||||
Difficulty: big.NewInt(2),
|
|
||||||
Number: big.NewInt(32),
|
|
||||||
GasLimit: 8253773,
|
|
||||||
GasUsed: 0,
|
|
||||||
Time: 1658408469,
|
|
||||||
Extra: []byte{216, 131, 1, 10, 19, 132, 103, 101, 116, 104, 136, 103, 111, 49, 46, 49, 56, 46, 50, 133, 108, 105, 110, 117, 120, 0, 0, 0, 0, 0, 0, 0, 113, 250, 240, 25, 148, 32, 193, 94, 196, 10, 99, 63, 251, 130, 170, 0, 176, 201, 149, 55, 230, 58, 218, 112, 84, 153, 122, 83, 134, 52, 176, 99, 53, 54, 63, 12, 226, 81, 38, 176, 57, 117, 92, 205, 237, 81, 203, 232, 220, 228, 166, 254, 206, 136, 7, 253, 2, 61, 47, 217, 235, 24, 140, 92, 1},
|
|
||||||
MixDigest: common.Hash{},
|
|
||||||
Nonce: types.BlockNonce{},
|
|
||||||
BaseFee: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
// State nodes for all paths at height 32
|
|
||||||
// Total 7
|
|
||||||
var ChainB_Block32_stateNode0RLP = []byte{248, 145, 128, 128, 128, 160, 151, 6, 152, 177, 246, 151, 39, 79, 71, 219, 192, 153, 253, 0, 46, 66, 56, 238, 116, 176, 237, 244, 79, 132, 49, 29, 30, 82, 108, 53, 191, 204, 128, 128, 160, 46, 224, 200, 157, 30, 24, 225, 92, 222, 131, 123, 169, 124, 86, 228, 124, 79, 136, 236, 83, 185, 22, 67, 136, 5, 73, 46, 110, 136, 138, 101, 63, 128, 128, 160, 104, 220, 31, 84, 240, 26, 100, 148, 110, 49, 52, 120, 81, 119, 30, 251, 196, 107, 11, 134, 124, 238, 93, 61, 109, 109, 181, 208, 10, 189, 17, 92, 128, 128, 160, 171, 149, 11, 254, 75, 39, 224, 164, 133, 151, 153, 47, 109, 134, 15, 169, 139, 206, 132, 93, 220, 210, 0, 225, 235, 118, 121, 247, 173, 12, 135, 133, 128, 128, 128, 128}
|
|
||||||
var ChainB_Block32_stateNode0CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode0RLP))
|
|
||||||
var ChainB_Block32_stateNode1RLP = []byte{248, 81, 128, 128, 128, 160, 209, 34, 171, 171, 30, 147, 168, 199, 137, 152, 249, 118, 14, 166, 1, 169, 116, 224, 82, 196, 237, 83, 255, 188, 228, 197, 7, 178, 144, 137, 77, 55, 128, 128, 128, 128, 128, 160, 135, 96, 108, 173, 177, 63, 201, 196, 26, 204, 72, 118, 17, 30, 76, 117, 155, 63, 68, 187, 4, 249, 78, 69, 161, 82, 178, 234, 164, 48, 158, 173, 128, 128, 128, 128, 128, 128, 128}
|
|
||||||
var ChainB_Block32_stateNode1CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode1RLP))
|
|
||||||
var ChainB_Block32_stateNode2RLP = []byte{248, 105, 160, 32, 21, 58, 188, 102, 126, 135, 59, 96, 54, 200, 164, 107, 221, 132, 126, 42, 222, 63, 137, 185, 51, 28, 120, 239, 37, 83, 254, 161, 148, 197, 13, 184, 70, 248, 68, 1, 128, 160, 168, 127, 48, 6, 204, 116, 51, 247, 216, 182, 191, 182, 185, 124, 223, 202, 239, 15, 67, 91, 253, 165, 42, 2, 54, 10, 211, 250, 242, 149, 205, 139, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
|
|
||||||
var ChainB_Block32_stateNode2CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode2RLP))
|
|
||||||
var ChainB_Block32_stateNode3RLP = []byte{248, 105, 160, 32, 252, 41, 63, 199, 2, 228, 43, 156, 2, 63, 9, 72, 38, 84, 93, 180, 47, 192, 253, 242, 186, 3, 27, 181, 34, 213, 239, 145, 122, 110, 219, 184, 70, 248, 68, 1, 128, 160, 25, 80, 158, 144, 166, 222, 32, 247, 189, 42, 34, 60, 40, 240, 56, 105, 251, 184, 132, 209, 219, 59, 60, 16, 221, 204, 228, 74, 76, 113, 37, 226, 160, 224, 22, 140, 8, 116, 27, 79, 113, 64, 185, 215, 180, 38, 38, 236, 164, 5, 87, 211, 15, 88, 153, 138, 185, 94, 186, 125, 137, 164, 198, 141, 192}
|
|
||||||
var ChainB_Block32_stateNode3CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode3RLP))
|
|
||||||
var ChainB_Block32_stateNode4RLP = []byte{248, 118, 160, 55, 171, 60, 13, 215, 117, 244, 72, 175, 127, 180, 18, 67, 65, 94, 214, 251, 151, 93, 21, 48, 162, 216, 40, 246, 155, 234, 115, 70, 35, 26, 215, 184, 83, 248, 81, 10, 141, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112}
|
|
||||||
var ChainB_Block32_stateNode4CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode4RLP))
|
|
||||||
var ChainB_Block32_stateNode5RLP = []byte{248, 105, 160, 51, 151, 227, 61, 237, 218, 71, 99, 174, 161, 67, 252, 97, 81, 235, 205, 154, 147, 246, 45, 183, 166, 165, 86, 212, 108, 88, 93, 130, 173, 42, 252, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
|
|
||||||
var ChainB_Block32_stateNode5CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode5RLP))
|
|
||||||
var ChainB_Block32_stateNode6RLP = []byte{248, 105, 160, 58, 188, 94, 219, 48, 85, 131, 227, 63, 102, 50, 44, 238, 228, 48, 136, 170, 153, 39, 125, 167, 114, 254, 181, 5, 53, 18, 208, 58, 10, 112, 43, 184, 70, 248, 68, 1, 128, 160, 54, 174, 96, 33, 243, 186, 113, 120, 188, 222, 254, 210, 63, 40, 4, 130, 154, 156, 66, 247, 130, 93, 88, 113, 144, 78, 47, 252, 174, 140, 130, 45, 160, 29, 80, 58, 104, 206, 141, 36, 93, 124, 217, 67, 93, 183, 43, 71, 98, 114, 126, 124, 105, 229, 48, 218, 194, 109, 83, 20, 76, 13, 102, 156, 130}
|
|
||||||
var ChainB_Block32_stateNode6CID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ChainB_Block32_stateNode6RLP))
|
|
||||||
|
|
||||||
var ChainB_Block32_StateIPLDs = []models.IPLDModel{
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode0CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode0RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode1CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode1RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode2CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode2RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode3CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode3RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode4CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode4RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode5CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode5RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: ChainB_Block32_stateNode6CID.String(),
|
|
||||||
Data: ChainB_Block32_stateNode6RLP,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var ChainB_Block32_StateNodes = []models.StateNodeModel{
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
CID: ChainB_Block32_stateNode2CID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "0",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: common.HexToHash("0xe0168c08741b4f7140b9d7b42626eca40557d30f58998ab95eba7d89a4c68dc0").Hex(),
|
|
||||||
StorageRoot: common.HexToHash("0xa87f3006cc7433f7d8b6bfb6b97cdfcaef0f435bfda52a02360ad3faf295cd8b").Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
CID: ChainB_Block32_stateNode3CID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "1000",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
|
||||||
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
CID: ChainB_Block32_stateNode4CID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "1000",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
|
||||||
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: "0x67ab3c0dd775f448af7fb41243415ed6fb975d1530a2d828f69bea7346231ad7",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
CID: ChainB_Block32_stateNode5CID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "1000",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: crypto.Keccak256Hash([]byte{}).Hex(),
|
|
||||||
StorageRoot: crypto.Keccak256Hash(ChainB_block1_EmptyRootNodeRLP).Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
CID: ChainB_Block32_stateNode6CID.String(),
|
|
||||||
Diff: false,
|
|
||||||
Balance: "0",
|
|
||||||
Nonce: 1,
|
|
||||||
CodeHash: common.HexToHash("0x1d503a68ce8d245d7cd9435db72b4762727e7c69e530dac26d53144c0d669c82").Hex(),
|
|
||||||
StorageRoot: common.HexToHash("0x36ae6021f3ba7178bcdefed23f2804829a9c42f7825d5871904e2ffcae8c822d").Hex(),
|
|
||||||
Removed: false,
|
|
||||||
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storage nodes for all paths at height 32
|
|
||||||
// Total 18
|
|
||||||
var chainB_Block32_storageNode0RLP = []byte{248, 145, 128, 128, 128, 128, 160, 46, 77, 227, 140, 57, 224, 108, 238, 40, 82, 145, 79, 210, 174, 54, 248, 0, 145, 137, 64, 229, 230, 148, 145, 250, 132, 89, 198, 8, 249, 245, 133, 128, 160, 146, 250, 117, 217, 106, 75, 51, 124, 196, 244, 29, 16, 47, 173, 5, 90, 86, 19, 15, 48, 179, 174, 60, 171, 112, 154, 92, 70, 232, 164, 141, 165, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
|
|
||||||
var chainB_Block32_storageNode0CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode0RLP))
|
|
||||||
var chainB_Block32_storageNode1RLP = []byte{248, 81, 160, 167, 145, 134, 15, 219, 140, 96, 62, 101, 242, 176, 129, 164, 160, 200, 221, 13, 1, 246, 167, 156, 45, 205, 192, 88, 236, 235, 80, 105, 178, 123, 2, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 18, 136, 22, 150, 26, 170, 67, 152, 182, 246, 95, 49, 193, 199, 219, 163, 97, 25, 243, 70, 126, 235, 163, 59, 44, 16, 37, 37, 247, 50, 229, 70, 128, 128}
|
|
||||||
var chainB_Block32_storageNode1CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode1RLP))
|
|
||||||
var chainB_Block32_storageNode2RLP = []byte{236, 160, 32, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
|
||||||
var chainB_Block32_storageNode2CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode2RLP))
|
|
||||||
var chainB_Block32_storageNode3RLP = []byte{226, 160, 32, 44, 236, 111, 71, 132, 84, 126, 80, 66, 161, 99, 128, 134, 227, 24, 137, 41, 243, 79, 60, 0, 5, 248, 222, 195, 102, 201, 110, 129, 149, 172, 100}
|
|
||||||
var chainB_Block32_storageNode3CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode3RLP))
|
|
||||||
var chainB_Block32_storageNode4RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 159, 255, 156}
|
|
||||||
var chainB_Block32_storageNode4CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode4RLP))
|
|
||||||
var chainB_Block32_storageNode5RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
|
|
||||||
var chainB_Block32_storageNode5CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode5RLP))
|
|
||||||
var chainB_Block32_storageNode6RLP = []byte{248, 67, 160, 58, 53, 172, 251, 193, 95, 248, 26, 57, 174, 125, 52, 79, 215, 9, 242, 142, 134, 0, 180, 170, 140, 101, 198, 182, 75, 254, 127, 227, 107, 209, 155, 161, 160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6}
|
|
||||||
var chainB_Block32_storageNode6CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode6RLP))
|
|
||||||
var chainB_Block32_storageNode7RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
|
|
||||||
var chainB_Block32_storageNode7CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode7RLP))
|
|
||||||
var chainB_Block32_storageNode8RLP = []byte{248, 67, 160, 50, 87, 90, 14, 158, 89, 60, 0, 249, 89, 248, 201, 47, 18, 219, 40, 105, 195, 57, 90, 59, 5, 2, 208, 94, 37, 22, 68, 111, 113, 248, 91, 161, 160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8}
|
|
||||||
var chainB_Block32_storageNode8CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode8RLP))
|
|
||||||
var chainB_Block32_storageNode9RLP = []byte{248, 145, 128, 128, 128, 128, 160, 145, 86, 15, 219, 52, 36, 164, 68, 160, 227, 156, 111, 1, 245, 112, 184, 187, 242, 26, 138, 8, 98, 129, 35, 57, 212, 165, 21, 204, 151, 229, 43, 128, 160, 250, 205, 84, 126, 141, 108, 126, 228, 162, 8, 238, 234, 141, 159, 232, 175, 70, 112, 207, 55, 165, 209, 107, 153, 54, 183, 60, 172, 194, 251, 66, 61, 128, 160, 107, 250, 27, 137, 190, 180, 7, 172, 62, 97, 13, 157, 215, 114, 55, 219, 14, 244, 163, 155, 192, 255, 34, 143, 154, 149, 33, 227, 166, 135, 164, 93, 128, 128, 128, 160, 173, 131, 221, 2, 30, 147, 11, 230, 58, 166, 18, 25, 90, 56, 198, 126, 196, 130, 131, 1, 213, 112, 129, 155, 96, 143, 121, 231, 218, 97, 216, 200, 128, 128, 128, 128}
|
|
||||||
var chainB_Block32_storageNode9CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode9RLP))
|
|
||||||
var chainB_Block32_storageNode10RLP = []byte{236, 160, 48, 87, 135, 250, 18, 168, 35, 224, 242, 183, 99, 28, 196, 27, 59, 168, 130, 139, 51, 33, 202, 129, 17, 17, 250, 117, 205, 58, 163, 187, 90, 206, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
|
||||||
var chainB_Block32_storageNode10CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode10RLP))
|
|
||||||
var chainB_Block32_storageNode11RLP = []byte{236, 160, 58, 160, 42, 17, 221, 77, 37, 151, 49, 139, 113, 212, 147, 177, 69, 221, 246, 174, 8, 23, 169, 211, 148, 127, 69, 213, 41, 166, 167, 95, 43, 239, 138, 137, 54, 53, 201, 173, 197, 222, 160, 0, 0}
|
|
||||||
var chainB_Block32_storageNode11CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode11RLP))
|
|
||||||
var chainB_Block32_storageNode12RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
|
|
||||||
var chainB_Block32_storageNode12CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode12RLP))
|
|
||||||
var chainB_Block32_storageNode13RLP = []byte{248, 81, 128, 128, 160, 79, 197, 241, 58, 178, 249, 186, 12, 45, 168, 139, 1, 81, 171, 14, 124, 244, 216, 93, 8, 204, 164, 92, 205, 146, 60, 106, 183, 99, 35, 235, 40, 128, 128, 128, 128, 128, 128, 128, 128, 160, 82, 154, 228, 80, 107, 126, 132, 72, 3, 170, 88, 197, 100, 216, 50, 21, 226, 183, 86, 42, 208, 239, 184, 183, 152, 93, 188, 113, 224, 234, 218, 43, 128, 128, 128, 128, 128}
|
|
||||||
var chainB_Block32_storageNode13CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode13RLP))
|
|
||||||
var chainB_Block32_storageNode14RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
|
|
||||||
var chainB_Block32_storageNode14CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode14RLP))
|
|
||||||
var chainB_Block32_storageNode15RLP = []byte{226, 160, 57, 13, 236, 217, 84, 139, 98, 168, 214, 3, 69, 169, 136, 56, 111, 200, 75, 166, 188, 149, 72, 64, 8, 246, 54, 47, 147, 22, 14, 243, 229, 99, 1}
|
|
||||||
var chainB_Block32_storageNode15CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode15RLP))
|
|
||||||
var chainB_Block32_storageNode16RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
|
|
||||||
var chainB_Block32_storageNode16CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode16RLP))
|
|
||||||
var chainB_Block32_storageNode17RLP = []byte{226, 160, 49, 14, 45, 82, 118, 18, 7, 59, 38, 238, 205, 253, 113, 126, 106, 50, 12, 244, 75, 74, 250, 194, 176, 115, 45, 159, 203, 226, 183, 250, 12, 246, 4}
|
|
||||||
var chainB_Block32_storageNode17CID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(chainB_Block32_storageNode17RLP))
|
|
||||||
|
|
||||||
var ChainB_Block32_StorageIPLDs = []models.IPLDModel{
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode0CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode0RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode1CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode1RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode2CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode2RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode3CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode3RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode4CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode4RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode5CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode5RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode6CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode6RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode7CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode7RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode8CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode8RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode9CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode9RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode10CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode10RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode11CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode11RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode12CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode12RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode13CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode13RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode14CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode14RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode15CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode15RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode16CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode16RLP,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
Key: chainB_Block32_storageNode17CID.String(),
|
|
||||||
Data: chainB_Block32_storageNode17RLP,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var ChainB_Block32_StorageNodes = []models.StorageNodeModel{
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
|
|
||||||
CID: chainB_Block32_storageNode2CID.String(),
|
|
||||||
Value: []byte{137, 54, 53, 201, 173, 197, 222, 160, 0, 0},
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
}, // 0
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x4e2cec6f4784547e5042a1638086e3188929f34f3c0005f8dec366c96e8195ac",
|
|
||||||
CID: chainB_Block32_storageNode3CID.String(),
|
|
||||||
Value: []byte{100},
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
}, // 1
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
|
|
||||||
CID: chainB_Block32_storageNode4CID.String(),
|
|
||||||
Value: []byte{137, 54, 53, 201, 173, 197, 222, 159, 255, 156},
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
}, // 2
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
|
|
||||||
CID: chainB_Block32_storageNode5CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
|
||||||
}, // 3
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b",
|
|
||||||
CID: chainB_Block32_storageNode6CID.String(),
|
|
||||||
Value: []byte{160, 71, 76, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6},
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
}, // 4
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
|
|
||||||
CID: chainB_Block32_storageNode7CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
|
||||||
}, // 5
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b",
|
|
||||||
CID: chainB_Block32_storageNode8CID.String(),
|
|
||||||
Value: []byte{160, 71, 111, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8},
|
|
||||||
StateKey: "0x33153abc667e873b6036c8a46bdd847e2ade3f89b9331c78ef2553fea194c50d",
|
|
||||||
}, // 6
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace",
|
|
||||||
CID: chainB_Block32_storageNode10CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
|
||||||
}, // 7
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x6aa02a11dd4d2597318b71d493b145ddf6ae0817a9d3947f45d529a6a75f2bef",
|
|
||||||
CID: chainB_Block32_storageNode11CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x39fc293fc702e42b9c023f094826545db42fc0fdf2ba031bb522d5ef917a6edb'",
|
|
||||||
}, // 8
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
|
|
||||||
CID: chainB_Block32_storageNode14CID.String(),
|
|
||||||
Value: []byte{'\x01'},
|
|
||||||
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
|
||||||
}, // 9
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563",
|
|
||||||
CID: chainB_Block32_storageNode15CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
|
||||||
}, // 10
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
|
|
||||||
CID: chainB_Block32_storageNode16CID.String(),
|
|
||||||
Value: []byte{'\x04'},
|
|
||||||
StateKey: "0xcabc5edb305583e33f66322ceee43088aa99277da772feb5053512d03a0a702b",
|
|
||||||
}, // 11
|
|
||||||
{
|
|
||||||
BlockNumber: ChainB_Block32_Header.Number.String(),
|
|
||||||
HeaderID: ChainB_Block32_Header.Hash().Hex(),
|
|
||||||
Diff: false,
|
|
||||||
Removed: false,
|
|
||||||
StorageKey: "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6",
|
|
||||||
CID: chainB_Block32_storageNode17CID.String(),
|
|
||||||
Value: []byte{},
|
|
||||||
StateKey: "0x9397e33dedda4763aea143fc6151ebcd9a93f62db7a6a556d46c585d82ad2afc",
|
|
||||||
}, // 12
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contracts used in chainB
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
contract Test {
|
|
||||||
uint256 private count;
|
|
||||||
uint256 private count2;
|
|
||||||
|
|
||||||
event Increment(uint256 count);
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
count2 = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
function incrementCount() public returns (uint256) {
|
|
||||||
count = count + 1;
|
|
||||||
emit Increment(count);
|
|
||||||
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
function destroy() public {
|
|
||||||
selfdestruct(payable(msg.sender));
|
|
||||||
}
|
|
||||||
|
|
||||||
function deleteCount2() public {
|
|
||||||
count2 = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
import "@openzeppelin/contracts/token/ERC20/ERC20.sol";
|
|
||||||
|
|
||||||
contract GLDToken is ERC20 {
|
|
||||||
constructor(uint256 initialSupply) ERC20("Gold", "GLD") {
|
|
||||||
_mint(msg.sender, initialSupply);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
59
test/helper.go
Normal file
59
test/helper.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
||||||
|
ethnode "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultNodeInfo = ethnode.Info{
|
||||||
|
ID: "test_nodeid",
|
||||||
|
ClientName: "test_client",
|
||||||
|
GenesisBlock: "TEST_GENESIS",
|
||||||
|
NetworkID: "test_network",
|
||||||
|
ChainID: 0,
|
||||||
|
}
|
||||||
|
DefaultPgConfig = postgres.Config{
|
||||||
|
Hostname: "localhost",
|
||||||
|
Port: 5432,
|
||||||
|
DatabaseName: "vulcanize_test",
|
||||||
|
Username: "vulcanize",
|
||||||
|
Password: "vulcanize_password",
|
||||||
|
|
||||||
|
MaxIdle: 0,
|
||||||
|
MaxConnLifetime: 0,
|
||||||
|
MaxConns: 4,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func NeedsDB(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
if os.Getenv("TEST_WITH_DB") == "" {
|
||||||
|
t.Skip("set TEST_WITH_DB to enable test")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NoError(t *testing.T, err error) {
|
||||||
|
t.Helper()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpectEqual asserts the provided interfaces are deep equal
|
||||||
|
func ExpectEqual(t *testing.T, want, got interface{}) {
|
||||||
|
if !reflect.DeepEqual(want, got) {
|
||||||
|
t.Fatalf("Values not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExpectEqualBytes(t *testing.T, want, got []byte) {
|
||||||
|
if !bytes.Equal(want, got) {
|
||||||
|
t.Fatalf("Bytes not equal:\nExpected:\t%v\nActual:\t\t%v", want, got)
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user