Initial statediff plugin #2
3
.dockerignore
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
.git
|
||||||
|
**/*_test.go
|
||||||
|
*.so
|
89
.gitea/workflows/test.yml
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
name: Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: '*'
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- ci-test
|
||||||
|
|
||||||
|
# Needed until we can incorporate docker startup into the executor container
|
||||||
|
env:
|
||||||
|
DOCKER_HOST: unix:///var/run/dind.sock
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
unit-tests:
|
||||||
|
name: "Run unit tests"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
check-latest: true
|
||||||
|
- name: "Run dockerd"
|
||||||
|
run: |
|
||||||
|
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
sleep 5
|
||||||
|
- name: "Run DB container"
|
||||||
|
run: |
|
||||||
|
docker compose -f test/compose.yml up --wait
|
||||||
|
- name: "Set up Gitea access token"
|
||||||
|
env:
|
||||||
|
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||||
|
run: |
|
||||||
|
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf https://git.vdb.to/
|
||||||
|
- name: "Run tests"
|
||||||
|
run: go test -v ./...
|
||||||
|
|
||||||
|
integration-tests:
|
||||||
|
name: "Run integration tests"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
path: ./plugeth-statediff
|
||||||
|
# TODO: replace with release
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: cerc-io/plugeth
|
||||||
|
ref: statediff-wip
|
||||||
|
path: ./plugeth
|
||||||
|
- name: "Run dockerd"
|
||||||
|
run: dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||||
|
# These images need access tokens configured
|
||||||
|
- name: "Build docker image"
|
||||||
|
env:
|
||||||
|
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||||
|
run: |
|
||||||
|
[[ -n "$TOKEN" ]]
|
||||||
|
docker build ./plugeth-statediff -t cerc/plugeth-statediff:local \
|
||||||
|
--build-arg GIT_VDBTO_TOKEN="$TOKEN"
|
||||||
|
docker build ./plugeth -t cerc/plugeth:local \
|
||||||
|
--build-arg GIT_VDBTO_TOKEN="$TOKEN"
|
||||||
|
|
||||||
|
- name: "Install stack-orchestrator"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: roysc/stack-orchestrator
|
||||||
|
ref: plugeth-testing
|
||||||
|
path: ./stack-orchestrator
|
||||||
|
- run: |
|
||||||
|
apt-get update && apt-get install -y python3-pip
|
||||||
|
pip install ./stack-orchestrator
|
||||||
|
- name: "Run testnet stack"
|
||||||
|
working-directory: ./plugeth-statediff
|
||||||
|
run: ./scripts/integration-setup.sh
|
||||||
|
- name: "Clone system-tests"
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: cerc-io/system-tests
|
||||||
|
ref: main
|
||||||
|
path: ./system-tests
|
||||||
|
- name: "Run tests"
|
||||||
|
working-directory: ./system-tests
|
||||||
|
run: |
|
||||||
|
pip install pytest
|
||||||
|
pip install -r requirements.txt
|
||||||
|
pytest -v -k test_basic_db
|
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
*.so
|
21
Dockerfile
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Using the same base golang image as plugeth
|
||||||
|
FROM golang:1.20-alpine3.18 as builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache gcc musl-dev binutils-gold linux-headers git
|
||||||
|
|
||||||
|
# Configure creds for gitea
|
||||||
|
ARG GIT_VDBTO_TOKEN
|
||||||
|
|
||||||
|
# Get and cache deps
|
||||||
|
WORKDIR /plugeth-statediff/
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
||||||
|
go mod download && \
|
||||||
|
rm -f ~/.gitconfig
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN go build --tags linkgeth --buildmode=plugin --trimpath -o statediff.so ./main
|
||||||
|
|
||||||
|
FROM alpine:3.18
|
||||||
|
|
||||||
|
COPY --from=builder /plugeth-statediff/statediff.so /usr/local/lib/
|
22
Makefile
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
MOCKGEN ?= mockgen
|
||||||
|
MOCKS_DIR := $(CURDIR)/test_helpers/mocks
|
||||||
|
|
||||||
|
mocks: $(MOCKS_DIR)/gen_backend.go
|
||||||
|
.PHONY: mocks
|
||||||
|
|
||||||
|
$(MOCKS_DIR)/gen_backend.go:
|
||||||
|
$(MOCKGEN) --package mocks --destination $@ \
|
||||||
|
github.com/openrelayxyz/plugeth-utils/core Backend,Downloader
|
||||||
|
|
||||||
|
docker-image: mocks
|
||||||
|
docker build . -t "cerc/plugeth-statediff:local"
|
||||||
|
.PHONY: docker-image
|
||||||
|
|
||||||
|
# Local build
|
||||||
|
BUILD_FLAGS := --trimpath
|
||||||
|
|
||||||
|
plugin: build/statediff.so
|
||||||
|
.PHONY: plugin
|
||||||
|
|
||||||
|
build/statediff.so: ./**/*.go
|
||||||
|
go build --tags linkgeth --buildmode=plugin -o $@ $(BUILD_FLAGS) ./main
|
350
README.md
@ -4,64 +4,35 @@
|
|||||||
|
|
||||||
## Package
|
## Package
|
||||||
|
|
||||||
This package provides an auxiliary service that asynchronously processes state diff objects from chain events,
|
This package provides a [PluGeth](https://github.com/openrelayxyz/plugeth) plugin implementing an
|
||||||
either relaying the state objects to RPC subscribers or writing them directly to Postgres as IPLD objects.
|
auxiliary service that asynchronously computes changes in the `go-ethereum` state trie. The service
|
||||||
|
continuously listens for chain updates and builds state diffs, then relays the data to RPC
|
||||||
|
subscribers or writes them directly to Postgres as IPLD objects.
|
||||||
|
|
||||||
It also exposes RPC endpoints for fetching or writing to Postgres the state diff at a specific block height
|
It also exposes RPC endpoints for fetching or writing to Postgres the state diff at a specific block
|
||||||
or for a specific block hash, this operates on historical block and state data and so depends on a complete state archive.
|
height or for a specific block hash. This operates on historical block and state data, and so
|
||||||
|
depends on a complete state archive.
|
||||||
|
|
||||||
Data is emitted in this differential format in order to make it feasible to IPLD-ize and index the _entire_ Ethereum state
|
Data is emitted in this differential format in order to make it feasible to the _entire_ Ethereum
|
||||||
(including intermediate state and storage trie nodes). If this state diff process is ran continuously from genesis,
|
state and publish it to IPLD (including intermediate state and storage trie nodes). If this service
|
||||||
the entire state at any block can be materialized from the cumulative differentials up to that point.
|
is run continuously from genesis, the entire state at any block can be materialized from the
|
||||||
|
cumulative differentials up to that point.
|
||||||
|
|
||||||
## Statediff object
|
## Interface types
|
||||||
|
|
||||||
A state diff `StateObject` is the collection of all the state and storage trie nodes that have been updated in a given block.
|
The primary interface type is `Payload`, which serves as the main interface for accessing data in a
|
||||||
For convenience, we also associate these nodes with the block number and hash, and optionally the set of code hashes and code for any
|
service. It packages various data components such as block RLP, total difficulty, receipts RLP, and
|
||||||
contracts deployed in this block.
|
state object RLP. This encapsulates all of the differential data at a given block, and allows us to
|
||||||
|
index the entire Ethereum data structure as hash-linked IPLD objects.
|
||||||
|
|
||||||
A complete state diff `StateObject` will include all state and storage intermediate nodes, which is necessary for generating proofs and for
|
The `StateObject` type represents the final diff output structure, including an array of state leaf
|
||||||
traversing the tries.
|
nodes and IPLD objects. For convenience, we also associate this object with the block number and
|
||||||
|
hash.
|
||||||
|
|
||||||
```go
|
State leaf nodes contain information about account changes, including whether they are removed, an
|
||||||
// StateObject is a collection of state (and linked storage nodes) as well as the associated block number, block hash,
|
account wrapper with account details and identifiers, and an array of storage leaf nodes
|
||||||
// and a set of code hashes and their code
|
representing storage changes. The IPLD type encapsulates CID-content pairs, used for code mappings
|
||||||
type StateObject struct {
|
and trie node (both intermediate and leaf) IPLD objects.
|
||||||
BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
|
|
||||||
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
|
||||||
Nodes []StateNode `json:"nodes" gencodec:"required"`
|
|
||||||
CodeAndCodeHashes []CodeAndCodeHash `json:"codeMapping"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StateNode holds the data for a single state diff node
|
|
||||||
type StateNode struct {
|
|
||||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
|
||||||
Path []byte `json:"path" gencodec:"required"`
|
|
||||||
NodeValue []byte `json:"value" gencodec:"required"`
|
|
||||||
StorageNodes []StorageNode `json:"storage"`
|
|
||||||
LeafKey []byte `json:"leafKey"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StorageNode holds the data for a single storage diff node
|
|
||||||
type StorageNode struct {
|
|
||||||
NodeType NodeType `json:"nodeType" gencodec:"required"`
|
|
||||||
Path []byte `json:"path" gencodec:"required"`
|
|
||||||
NodeValue []byte `json:"value" gencodec:"required"`
|
|
||||||
LeafKey []byte `json:"leafKey"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CodeAndCodeHash struct for holding codehash => code mappings
|
|
||||||
// we can't use an actual map because they are not rlp serializable
|
|
||||||
type CodeAndCodeHash struct {
|
|
||||||
Hash common.Hash `json:"codeHash"`
|
|
||||||
Code []byte `json:"code"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
These objects are packed into a `Payload` structure which can additionally associate the `StateObject`
|
|
||||||
with the block (header, uncles, and transactions), receipts, and total difficulty.
|
|
||||||
This `Payload` encapsulates all of the differential data at a given block, and allows us to index the entire Ethereum data structure
|
|
||||||
as hash-linked IPLD objects.
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// Payload packages the data to send to state diff subscriptions
|
// Payload packages the data to send to state diff subscriptions
|
||||||
@ -71,80 +42,99 @@ type Payload struct {
|
|||||||
ReceiptsRlp []byte `json:"receiptsRlp"`
|
ReceiptsRlp []byte `json:"receiptsRlp"`
|
||||||
StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"`
|
StateObjectRlp []byte `json:"stateObjectRlp" gencodec:"required"`
|
||||||
|
|
||||||
encoded []byte
|
// ...
|
||||||
err error
|
}
|
||||||
|
|
||||||
|
// in package "types":
|
||||||
|
|
||||||
|
// StateObject is the final output structure from the builder
|
||||||
|
type StateObject struct {
|
||||||
|
BlockNumber *big.Int `json:"blockNumber" gencodec:"required"`
|
||||||
|
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||||
|
Nodes []StateLeafNode `json:"nodes" gencodec:"required"`
|
||||||
|
IPLDs []IPLD `json:"iplds"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateLeafNode holds the data for a single state diff leaf node
|
||||||
|
type StateLeafNode struct {
|
||||||
|
Removed bool
|
||||||
|
AccountWrapper AccountWrapper
|
||||||
|
StorageDiff []StorageLeafNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountWrapper is used to temporarily associate the unpacked node with its raw values
|
||||||
|
type AccountWrapper struct {
|
||||||
|
Account *types.StateAccount
|
||||||
|
LeafKey []byte
|
||||||
|
CID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageLeafNode holds the data for a single storage diff node leaf node
|
||||||
|
type StorageLeafNode struct {
|
||||||
|
Removed bool
|
||||||
|
Value []byte
|
||||||
|
LeafKey []byte
|
||||||
|
CID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPLD holds a cid:content pair, e.g. for codehash to code mappings or for intermediate node IPLD objects
|
||||||
|
type IPLD struct {
|
||||||
|
CID string
|
||||||
|
Content []byte
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
This state diffing service runs as an auxiliary service concurrent to the regular syncing process of the geth node.
|
The service is started when the plugin library is loaded by PluGeth and runs as an auxiliary component of the node as it syncs.
|
||||||
|
|
||||||
### CLI configuration
|
### CLI configuration
|
||||||
|
|
||||||
This service introduces a CLI flag namespace `statediff`
|
This service introduces a CLI flag namespace `statediff`. Note that PluGeth plugin arguments must be separated from geth arguments by `--`, e.g. `geth --datadir data -- --statediff`.
|
||||||
|
|
||||||
`--statediff` flag is used to turn on the service
|
* `--statediff` is used to enable the service
|
||||||
|
* `--statediff.writing` is used to tell the service to write state diff objects it produces from synced `ChainEvent`s directly to a configured Postgres database
|
||||||
`--statediff.writing` is used to tell the service to write state diff objects it produces from synced ChainEvents directly to a configured Postgres database
|
* `--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database
|
||||||
|
* `--statediff.db.type` is the type of database we write out to (current options: `postgres`, `dump`, `file`)
|
||||||
`--statediff.workers` is used to set the number of concurrent workers to process state diff objects and write them into the database
|
* `--statediff.dump.dst` is the destination to write to when operating in database dump mode (`stdout`, `stderr`, `discard`)
|
||||||
|
* `--statediff.db.driver` is the specific driver to use for the database (current options for postgres: `pgx` and `sqlx`)
|
||||||
`--statediff.db.type` is the type of database we write out to (current options: postgres, dump, file)
|
* `--statediff.db.host` is the hostname address to dial to connect to the database
|
||||||
|
* `--statediff.db.port` is the port to dial to connect to the database
|
||||||
`--statediff.dump.dst` is the destination to write to when operating in database dump mode (stdout, stderr, discard)
|
* `--statediff.db.name` is the name of the database to connect to
|
||||||
|
* `--statediff.db.user` is the user to connect to the database as
|
||||||
`--statediff.db.driver` is the specific driver to use for the database (current options for postgres: pgx and sqlx)
|
* `--statediff.db.password` is the password to use to connect to the database
|
||||||
|
* `--statediff.db.conntimeout` is the connection timeout (in seconds)
|
||||||
`--statediff.db.host` is the hostname/ip to dial to connect to the database
|
* `--statediff.db.maxconns` is the maximum number of database connections
|
||||||
|
* `--statediff.db.minconns` is the minimum number of database connections
|
||||||
`--statediff.db.port` is the port to dial to connect to the database
|
* `--statediff.db.maxidleconns` is the maximum number of idle connections
|
||||||
|
* `--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds)
|
||||||
`--statediff.db.name` is the name of the database to connect to
|
* `--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds)
|
||||||
|
* `--statediff.db.nodeid` is the node id to use in the Postgres database
|
||||||
`--statediff.db.user` is the user to connect to the database as
|
* `--statediff.db.clientname` is the client name to use in the Postgres database
|
||||||
|
* `--statediff.db.upsert` whether or not the service, when operating in a direct database writing mode, should overwrite any existing conflicting data
|
||||||
`--statediff.db.password` is the password to use to connect to the database
|
* `--statediff.file.path` full path (including filename) to write statediff data out to when operating in file mode
|
||||||
|
* `--statediff.file.wapath` full path (including filename) to write statediff watched addresses out to when operating in file mode
|
||||||
`--statediff.db.conntimeout` is the connection timeout (in seconds)
|
|
||||||
|
|
||||||
`--statediff.db.maxconns` is the maximum number of database connections
|
|
||||||
|
|
||||||
`--statediff.db.minconns` is the minimum number of database connections
|
|
||||||
|
|
||||||
`--statediff.db.maxidleconns` is the maximum number of idle connections
|
|
||||||
|
|
||||||
`--statediff.db.maxconnidletime` is the maximum lifetime for an idle connection (in seconds)
|
|
||||||
|
|
||||||
`--statediff.db.maxconnlifetime` is the maximum lifetime for a connection (in seconds)
|
|
||||||
|
|
||||||
`--statediff.db.nodeid` is the node id to use in the Postgres database
|
|
||||||
|
|
||||||
`--statediff.db.clientname` is the client name to use in the Postgres database
|
|
||||||
|
|
||||||
`--statediff.db.upsert` whether or not the service, when operating in a direct database writing mode, should overwrite any existing conflicting data
|
|
||||||
|
|
||||||
`--statediff.file.path` full path (including filename) to write statediff data out to when operating in file mode
|
|
||||||
|
|
||||||
`--statediff.file.wapath` full path (including filename) to write statediff watched addresses out to when operating in file mode
|
|
||||||
|
|
||||||
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
|
The service can only operate in full sync mode (`--syncmode=full`), but only the historical RPC endpoints require an archive node (`--gcmode=archive`)
|
||||||
|
|
||||||
e.g.
|
e.g.
|
||||||
`./build/bin/geth --syncmode=full --gcmode=archive --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=cerc_testing --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname`
|
`geth --syncmode=full --gcmode=archive -- --statediff --statediff.writing --statediff.db.type=postgres --statediff.db.driver=sqlx --statediff.db.host=localhost --statediff.db.port=5432 --statediff.db.name=cerc_testing --statediff.db.user=postgres --statediff.db.nodeid=nodeid --statediff.db.clientname=clientname`
|
||||||
|
|
||||||
When operating in `--statediff.db.type=file` mode, the service will write SQL statements out to the file designated by
|
When operating in `--statediff.db.type=file` mode, the service will save SQL statements to the file
|
||||||
`--statediff.file.path`. Please note that it writes out SQL statements with all `ON CONFLICT` constraint checks dropped.
|
specified by `--statediff.file.path`. It's important to note that these SQL statements are written
|
||||||
This is done so that we can scale out the production of the SQL statements horizontally, merge the separate SQL files produced,
|
without any `ON CONFLICT` constraint checks. This omission allows us to:
|
||||||
de-duplicate using unix tools (`sort statediff.sql | uniq` or `sort -u statediff.sql`), bulk load using psql
|
* horizontally expand the production of SQL statements,
|
||||||
(`psql db_name --set ON_ERROR_STOP=on -f statediff.sql`), and then add our primary and foreign key constraints and indexes
|
* merge the individual SQL files generated,
|
||||||
back afterwards.
|
* remove duplicates using Unix tools (`sort statediff.sql | uniq` or `sort -u statediff.sql`),
|
||||||
|
* perform bulk loading using psql (`psql db_name --set ON_ERROR_STOP=on -f statediff.sql`),
|
||||||
|
* and then reinstate our primary and foreign key constraints and indexes.
|
||||||
|
|
||||||
### RPC endpoints
|
### Payload retrieval
|
||||||
|
|
||||||
The state diffing service exposes both a WS subscription endpoint, and a number of HTTP unary endpoints.
|
The state diffing service exposes both a websocket subscription endpoint, and a number of HTTP unary
|
||||||
|
endpoints for retrieving data payloads.
|
||||||
|
|
||||||
Each of these endpoints requires a set of parameters provided by the caller
|
Each of these endpoints requires a set of parameters provided by the caller:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// Params is used to carry in parameters from subscribing/requesting clients configuration
|
// Params is used to carry in parameters from subscribing/requesting clients configuration
|
||||||
@ -159,14 +149,17 @@ type Params struct {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Using these params we can tell the service whether to include state and/or storage intermediate nodes; whether
|
Using these params we can tell the service:
|
||||||
to include the associated block (header, uncles, and transactions); whether to include the associated receipts;
|
* whether to include state and/or storage intermediate nodes
|
||||||
whether to include the total difficulty for this block; whether to include the set of code hashes and code for
|
* whether to include the associated block (header, uncles, and transactions)
|
||||||
contracts deployed in this block; whether to limit the diffing process to a list of specific addresses.
|
* whether to include the associated receipts
|
||||||
|
* whether to include the total difficulty for this block
|
||||||
|
* whether to include the set of code hashes and code for contracts deployed in this block, and
|
||||||
|
* whether to limit the diffing process to a list of specific addresses.
|
||||||
|
|
||||||
#### Subscription endpoint
|
#### Subscription endpoints
|
||||||
|
|
||||||
A websocket supporting RPC endpoint is exposed for subscribing to state diff `StateObjects` that come off the head of the chain while the geth node syncs.
|
A websocket-supporting RPC endpoint is exposed for subscribing to state diff `StateObjects` that come off the head of the chain while the geth node syncs.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// Stream is a subscription endpoint that fires off state diff payloads as they are created
|
// Stream is a subscription endpoint that fires off state diff payloads as they are created
|
||||||
@ -182,7 +175,6 @@ with the "statediff" namespace, a `statediff.Payload` channel, and the name of t
|
|||||||
e.g.
|
e.g.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
cli, err := rpc.Dial("ipcPathOrWsURL")
|
cli, err := rpc.Dial("ipcPathOrWsURL")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// handle error
|
// handle error
|
||||||
@ -227,98 +219,64 @@ and the `statediff` namespace exposed (`--http.api=statediff`).
|
|||||||
|
|
||||||
### Direct indexing into Postgres
|
### Direct indexing into Postgres
|
||||||
|
|
||||||
If `--statediff.writing` is set, the service will convert the state diff `StateObject` data into IPLD objects, persist them directly to Postgres,
|
If `--statediff.writing` is enabled, the service will convert the `StateObject`s and all associated
|
||||||
and generate secondary indexes around the IPLD data.
|
data into IPLD objects, persist them directly to Postgres, and generate secondary indexes around the
|
||||||
|
IPLD data.
|
||||||
|
|
||||||
The schema and migrations for this Postgres database are provided in `statediff/db/`.
|
The schema and migrations for this Postgres database are defined in <https://github.com/cerc-io/ipld-eth-db>.
|
||||||
|
|
||||||
#### Postgres setup
|
#### RPC endpoints
|
||||||
|
|
||||||
We use [pressly/goose](https://github.com/pressly/goose) as our Postgres migration manager.
|
If enabled, direct indexing will be triggered on every `ChainEvent`, writing diffs for all new
|
||||||
You can also load the Postgres schema directly into a database using
|
blocks as they are received. However, the service also provides methods for clients to trigger and
|
||||||
|
track this process:
|
||||||
|
|
||||||
`psql database_name < schema.sql`
|
* The `WriteStateDiffAt` method directly writes a state diff object to the database at a specific
|
||||||
|
block height.
|
||||||
|
* Likewise, the `WriteStateDiffFor` method directly writes a state diff object to the database for
|
||||||
|
a specific block hash
|
||||||
|
* The `StreamWrites` method sets up a subscription to stream the status of completed calls to the
|
||||||
|
above methods.
|
||||||
|
* The `WatchAddress` method enables the modification of the watched addresses list, restricting
|
||||||
|
direct indexing for a given operation and arguments.
|
||||||
|
|
||||||
This will only work on a version 12.4 Postgres database.
|
|
||||||
|
|
||||||
#### Schema overview
|
#### Schema overview
|
||||||
|
|
||||||
Our Postgres schemas are built around a single IPFS backing Postgres IPLD blockstore table (`ipld.blocks`) that conforms with [go-ds-sql](https://github.com/ipfs/go-ds-sql/blob/master/postgres/postgres.go).
|
Our Postgres schemas are built around a single IPFS backing Postgres IPLD blockstore table
|
||||||
All IPLD objects are stored in this table, where `key` is the blockstore-prefixed multihash key for the IPLD object and `data` contains
|
(`ipld.blocks`) that conforms with
|
||||||
the bytes for the IPLD block (in the case of all Ethereum IPLDs, this is the RLP byte encoding of the Ethereum object).
|
[go-ds-sql](https://github.com/ipfs/go-ds-sql/blob/master/postgres/postgres.go). All IPLD objects
|
||||||
|
are stored in this table, where `key` is the CID for the IPLD object and `data` contains the bytes
|
||||||
|
for the IPLD block (in the case of all Ethereum IPLDs, this is the RLP byte encoding of the Ethereum
|
||||||
|
object).
|
||||||
|
|
||||||
The IPLD objects in this table can be traversed using an IPLD DAG interface, but since this table only maps multihash to raw IPLD object
|
The IPLD objects in this table can be traversed using an IPLD DAG interface, but since this table
|
||||||
it is not particularly useful for searching through the data by looking up Ethereum objects by their constituent fields
|
only maps CID to raw IPLD object it is not very suitable for looking up Ethereum objects by their
|
||||||
roysc marked this conversation as resolved
Outdated
|
|||||||
(e.g. by block number, tx source/recipient, state/storage trie node path). To improve the accessibility of these objects
|
constituent fields (e.g. by tx source/recipient, state/storage trie path). To improve the
|
||||||
we create an Ethereum [advanced data layout](https://github.com/ipld/specs#schemas-and-advanced-data-layouts) (ADL) by generating secondary
|
accessibility of these objects we create an Ethereum [advanced data
|
||||||
indexes on top of the raw IPLDs in other Postgres tables.
|
layout](https://github.com/ipld/specs#schemas-and-advanced-data-layouts) (ADL) by generating
|
||||||
|
secondary indexes on top of the raw IPLDs in other Postgres tables.
|
||||||
|
|
||||||
These secondary index tables fall under the `eth` schema and follow an `{objectType}_cids` naming convention.
|
These secondary index tables fall under the `eth` schema and follow an `{objectType}_cids` naming
|
||||||
These tables provide a view into individual fields of the underlying Ethereum IPLD objects, allowing lookups on these fields, and reference the raw IPLD objects stored in `ipld.blocks`
|
convention. These tables provide a view into individual fields of the underlying Ethereum IPLD
|
||||||
by foreign keys to their multihash keys.
|
objects, allowing lookups on these fields, and reference the raw IPLD objects stored in
|
||||||
Additionally, these tables maintain the hash-linked nature of Ethereum objects to one another. E.g. a storage trie node entry in the `storage_cids`
|
`ipld.blocks` by CID. Additionally, these tables maintain the hash-linked nature of Ethereum
|
||||||
table contains a `state_id` foreign key which references the `id` for the `state_cids` entry that contains the state leaf node for the contract that storage node belongs to,
|
objects to one another, e.g. a storage trie node entry in the `storage_cids` table contains a
|
||||||
and in turn that `state_cids` entry contains a `header_id` foreign key which references the `id` of the `header_cids` entry that contains the header for the block these state and storage nodes were updated (diffed).
|
`state_leaf_key` field referencing the `state_cids` entry for the state trie node of its owning
|
||||||
|
contract, and that `state_cids` entry in turn contains a `header_id` field referencing the
|
||||||
|
`block_hash` of the `header_cids` entry for the block in which these state and storage nodes were
|
||||||
|
updated (diffed).
|
||||||
|
|
||||||
### Optimization
|
### Optimization
|
||||||
|
|
||||||
On mainnet this process is extremely IO intensive and requires significant resources to allow it to keep up with the head of the chain.
|
On mainnet this process is extremely IO intensive and requires significant resources to allow it to
|
||||||
The state diff processing time for a specific block is dependent on the number and complexity of the state changes that occur in a block and
|
keep up with the head of the chain. The state diff processing time for a specific block is
|
||||||
the number of updated state nodes that are available in the in-memory cache vs must be retrieved from disc.
|
dependent on the number and complexity of the state changes that occur in a block and the number of
|
||||||
|
updated state nodes that are available in the in-memory cache vs must be retrieved from disc.
|
||||||
|
|
||||||
If memory permits, one means of improving the efficiency of this process is to increase the in-memory trie cache allocation.
|
If memory permits, one means of improving the efficiency of this process is to increase the
|
||||||
This can be done by increasing the overall `--cache` allocation and/or by increasing the % of the cache allocated to trie
|
in-memory trie cache allocation. This can be done by increasing the overall `--cache` allocation
|
||||||
usage with `--cache.trie`.
|
and/or by increasing the % of the cache allocated to trie usage with `--cache.trie`.
|
||||||
|
|
||||||
## Versioning, Branches, Rebasing, and Releasing
|
<!-- TO DO -->
|
||||||
|
<!-- ## Versioning, Branches, Rebasing, and Releasing -->
|
||||||
Internal tagged releases are maintained for building the latest version of statediffing geth or using it as a go mod dependency.
|
|
||||||
When a new core go-ethereum version is released, statediffing geth is rebased onto and adjusted to work with the new tag.
|
|
||||||
|
|
||||||
We want to maintain a complete record of our git history, but in order to make frequent and timely rebases feasible we also
|
|
||||||
need to be able to squash our work before performing a rebase. To this end we retain multiple branches with partial incremental history that culminate in
|
|
||||||
the full incremental history.
|
|
||||||
|
|
||||||
### Versioning
|
|
||||||
|
|
||||||
Example: `v1.10.16-statediff-3.0.2`
|
|
||||||
|
|
||||||
- The first section, `v1.10.16`, corresponds to the release of the root branch this version is rebased onto (e.g., [](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16)[https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.16))
|
|
||||||
- The second section, `3.0.2`, corresponds to the version of our statediffing code. The major version here (3) should always correspond with the major version of the `ipld-eth-db` schema version it works with (e.g., [](https://github.com/cerc-io/ipld-eth-db/releases/tag/v3.0.6)[https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6](https://github.com/vulcanize/ipld-eth-db/releases/tag/v3.0.6)); it is only bumped when we bump the major version of the schema.
|
|
||||||
- The major version of the schema is only bumped when a breaking change is made to the schema.
|
|
||||||
- The minor version is bumped when a new feature is added, or a fix is performed that breaks or updates the statediffing API or CLI in some way.
|
|
||||||
- The patch version is bumped whenever minor fixes/patches/features are done that don’t change/break API/CLI compatibility.
|
|
||||||
- We are very strict about the first section and the major version of the statediffing code, but some discretion is required when deciding to bump minor versus patch version of the statediffing code.
|
|
||||||
|
|
||||||
The statediff version is included in the `VersionMeta` in params/version.go
|
|
||||||
|
|
||||||
### Branches
|
|
||||||
|
|
||||||
We maintain two official kinds of branches:
|
|
||||||
|
|
||||||
Major Branch: `{Root Version}-statediff`
|
|
||||||
Major branches retain the cumulative state of all changes made before the latest root version rebase and track the full incremental history of changes made between the latest root version rebase and the next.
|
|
||||||
Aside from creating the branch by performing the rebase described in the section below, these branches are never worked off of or committed to directly.
|
|
||||||
|
|
||||||
Feature Branch: `{Root Version}-statediff-{Statediff Version}`
|
|
||||||
Feature branches are checked out from a major branch in order to work on a new feature or fix for the statediffing code.
|
|
||||||
The statediff version of a feature branch is the new version it affects on the major branch when merged. Internal tagged releases
|
|
||||||
are cut against these branches after they are merged back to the major branch.
|
|
||||||
|
|
||||||
If a developer is unsure what version their patch should affect, they should remain working on an unofficial branch. From there
|
|
||||||
they can open a PR against the targeted root branch and be directed to the appropriate feature version and branch.
|
|
||||||
|
|
||||||
### Rebasing
|
|
||||||
|
|
||||||
When a new root tagged release comes out we rebase our statediffing code on top of the new tag using the following process:
|
|
||||||
|
|
||||||
1. Checkout a new major branch for the tag from the current major branch
|
|
||||||
2. On the new major branch, squash all our commits since the last major rebase
|
|
||||||
3. On the new major branch, perform the rebase against the new tag
|
|
||||||
4. Push the new major branch to the remote
|
|
||||||
5. From the new major branch, checkout a new feature branch based on the new major version and the last statediff version
|
|
||||||
6. On this new feature branch, add the new major branch to the .github/workflows/on-master.yml list of "on push" branches
|
|
||||||
7. On this new feature branch, make any fixes/adjustments required for all statediffing geth tests to pass
|
|
||||||
8. PR this feature branch into the new major branch, this PR will trigger CI tests and builds.
|
|
||||||
9. After merging PR, rebase feature branch onto major branch
|
|
||||||
10. Cut a new release targeting the feature branch, this release should have the new root version but the same statediff version as the last release
|
|
||||||
|
74
adapt/state.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package adapt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
|
||||||
|
plugeth "github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StateView exposes a minimal interface for state access for diff building
|
||||||
|
type StateView interface {
|
||||||
|
OpenTrie(root common.Hash) (StateTrie, error)
|
||||||
|
ContractCode(codeHash common.Hash) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateTrie is an interface exposing only the necessary methods from state.Trie
|
||||||
|
type StateTrie interface {
|
||||||
|
GetKey([]byte) []byte
|
||||||
|
NodeIterator([]byte) trie.NodeIterator
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapts a state.Database to StateView - used in tests
|
||||||
|
type stateDatabaseView struct {
|
||||||
|
db state.Database
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ StateView = stateDatabaseView{}
|
||||||
|
|
||||||
|
func GethStateView(db state.Database) StateView {
|
||||||
|
return stateDatabaseView{db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stateDatabaseView) OpenTrie(root common.Hash) (StateTrie, error) {
|
||||||
|
return a.db.OpenTrie(common.Hash(root))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a stateDatabaseView) ContractCode(hash common.Hash) ([]byte, error) {
|
||||||
|
return a.db.ContractCode(common.Hash{}, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// adapts geth Trie to plugeth
|
||||||
|
type adaptTrie struct {
|
||||||
|
plugeth.Trie
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStateTrie(t plugeth.Trie) StateTrie { return adaptTrie{t} }
|
||||||
|
|
||||||
|
func (a adaptTrie) NodeIterator(start []byte) trie.NodeIterator {
|
||||||
|
return NodeIterator(a.Trie.NodeIterator(start))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NodeIterator(it plugeth.NodeIterator) trie.NodeIterator {
|
||||||
|
return adaptIter{it}
|
||||||
|
}
|
||||||
|
|
||||||
|
type adaptIter struct {
|
||||||
|
plugeth.NodeIterator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it adaptIter) Hash() common.Hash {
|
||||||
|
return common.Hash(it.NodeIterator.Hash())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it adaptIter) Parent() common.Hash {
|
||||||
|
return common.Hash(it.NodeIterator.Parent())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it adaptIter) AddResolver(resolver trie.NodeResolver) {
|
||||||
|
r := func(owner plugeth.Hash, path []byte, hash plugeth.Hash) []byte {
|
||||||
|
return resolver(common.Hash(owner), path, common.Hash(hash))
|
||||||
|
}
|
||||||
|
it.NodeIterator.AddResolver(r)
|
||||||
|
}
|
26
adapt/util.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package adapt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
||||||
|
plugeth_params "github.com/openrelayxyz/plugeth-utils/restricted/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ChainConfig(cc *plugeth_params.ChainConfig) *params.ChainConfig {
|
||||||
|
return ¶ms.ChainConfig{
|
||||||
|
ChainID: cc.ChainID,
|
||||||
|
HomesteadBlock: cc.HomesteadBlock,
|
||||||
|
DAOForkBlock: cc.DAOForkBlock,
|
||||||
|
DAOForkSupport: cc.DAOForkSupport,
|
||||||
|
EIP150Block: cc.EIP150Block,
|
||||||
|
EIP155Block: cc.EIP155Block,
|
||||||
|
EIP158Block: cc.EIP158Block,
|
||||||
|
ByzantiumBlock: cc.ByzantiumBlock,
|
||||||
|
ConstantinopleBlock: cc.ConstantinopleBlock,
|
||||||
|
PetersburgBlock: cc.PetersburgBlock,
|
||||||
|
IstanbulBlock: cc.IstanbulBlock,
|
||||||
|
MuirGlacierBlock: cc.MuirGlacierBlock,
|
||||||
|
BerlinBlock: cc.BerlinBlock,
|
||||||
|
LondonBlock: cc.LondonBlock,
|
||||||
|
}
|
||||||
|
}
|
156
api.go
@ -19,11 +19,10 @@ package statediff
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIName is the namespace used for the state diffing service API
|
// APIName is the namespace used for the state diffing service API
|
||||||
@ -35,108 +34,61 @@ const APIVersion = "0.0.1"
|
|||||||
// PublicStateDiffAPI provides an RPC subscription interface
|
// PublicStateDiffAPI provides an RPC subscription interface
|
||||||
// that can be used to stream out state diffs as they
|
// that can be used to stream out state diffs as they
|
||||||
// are produced by a full node
|
// are produced by a full node
|
||||||
type PublicStateDiffAPI struct {
|
type PublicAPI struct {
|
||||||
sds IService
|
sds *Service
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPublicStateDiffAPI creates an rpc subscription interface for the underlying statediff service
|
// NewPublicStateDiffAPI creates an rpc subscription interface for the underlying statediff service
|
||||||
func NewPublicStateDiffAPI(sds IService) *PublicStateDiffAPI {
|
func NewPublicAPI(sds *Service) *PublicAPI {
|
||||||
return &PublicStateDiffAPI{
|
return &PublicAPI{
|
||||||
sds: sds,
|
sds: sds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream is the public method to setup a subscription that fires off statediff service payloads as they are created
|
// Stream subscribes to statediff payloads as they are created.
|
||||||
func (api *PublicStateDiffAPI) Stream(ctx context.Context, params Params) (*rpc.Subscription, error) {
|
func (api *PublicAPI) Stream(ctx context.Context, params Params) (<-chan Payload, error) {
|
||||||
// ensure that the RPC connection supports subscriptions
|
payloadChan := make(chan Payload, chainEventChanSize)
|
||||||
roysc marked this conversation as resolved
Outdated
i-norden
commented
Nice that they did away with the Nice that they did away with the `*rpc.Subscription`, returning a channel makes it much simpler
|
|||||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
clientChan := make(chan Payload, chainEventChanSize)
|
||||||
if !supported {
|
quitChan := make(chan bool, 1)
|
||||||
return nil, rpc.ErrNotificationsUnsupported
|
// subscribe to the service's payload broadcasts
|
||||||
}
|
id := api.sds.Subscribe(payloadChan, quitChan, params)
|
||||||
|
|
||||||
// create subscription and start waiting for events
|
|
||||||
rpcSub := notifier.CreateSubscription()
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// subscribe to events from the statediff service
|
defer close(clientChan)
|
||||||
payloadChannel := make(chan Payload, chainEventChanSize)
|
defer close(payloadChan)
|
||||||
quitChan := make(chan bool, 1)
|
defer func() {
|
||||||
api.sds.Subscribe(rpcSub.ID, payloadChannel, quitChan, params)
|
if err := api.sds.Unsubscribe(id); err != nil {
|
||||||
// loop and await payloads and relay them to the subscriber with the notifier
|
log.Error("Failed to unsubscribe from statediff service", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case payload := <-payloadChannel:
|
case payload := <-payloadChan:
|
||||||
if err := notifier.Notify(rpcSub.ID, payload); err != nil {
|
clientChan <- payload
|
||||||
log.Error("Failed to send state diff packet; error: " + err.Error())
|
case <-ctx.Done():
|
||||||
if err := api.sds.Unsubscribe(rpcSub.ID); err != nil {
|
|
||||||
log.Error("Failed to unsubscribe from the state diff service; error: " + err.Error())
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
|
||||||
case err := <-rpcSub.Err():
|
|
||||||
if err != nil {
|
|
||||||
log.Error("State diff service rpcSub error: " + err.Error())
|
|
||||||
err = api.sds.Unsubscribe(rpcSub.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to unsubscribe from the state diff service; error: " + err.Error())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-quitChan:
|
case <-quitChan:
|
||||||
// don't need to unsubscribe, service does so before sending the quit signal
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return rpcSub, nil
|
return clientChan, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateDiffAt returns a state diff payload at the specific blockheight
|
// StateDiffAt returns a state diff payload at the specific blockheight
|
||||||
func (api *PublicStateDiffAPI) StateDiffAt(ctx context.Context, blockNumber uint64, params Params) (*Payload, error) {
|
func (api *PublicAPI) StateDiffAt(ctx context.Context, blockNumber uint64, params Params) (*Payload, error) {
|
||||||
return api.sds.StateDiffAt(blockNumber, params)
|
return api.sds.StateDiffAt(blockNumber, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateDiffFor returns a state diff payload for the specific blockhash
|
// StateDiffFor returns a state diff payload for the specific blockhash
|
||||||
func (api *PublicStateDiffAPI) StateDiffFor(ctx context.Context, blockHash common.Hash, params Params) (*Payload, error) {
|
func (api *PublicAPI) StateDiffFor(ctx context.Context, blockHash common.Hash, params Params) (*Payload, error) {
|
||||||
return api.sds.StateDiffFor(blockHash, params)
|
return api.sds.StateDiffFor(blockHash, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StreamCodeAndCodeHash writes all of the codehash=>code pairs out to a websocket channel
|
|
||||||
func (api *PublicStateDiffAPI) StreamCodeAndCodeHash(ctx context.Context, blockNumber uint64) (*rpc.Subscription, error) {
|
|
||||||
// ensure that the RPC connection supports subscriptions
|
|
||||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
||||||
if !supported {
|
|
||||||
return nil, rpc.ErrNotificationsUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// create subscription and start waiting for events
|
|
||||||
rpcSub := notifier.CreateSubscription()
|
|
||||||
payloadChan := make(chan types.CodeAndCodeHash, chainEventChanSize)
|
|
||||||
quitChan := make(chan bool)
|
|
||||||
api.sds.StreamCodeAndCodeHash(blockNumber, payloadChan, quitChan)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case payload := <-payloadChan:
|
|
||||||
if err := notifier.Notify(rpcSub.ID, payload); err != nil {
|
|
||||||
log.Error("Failed to send code and codehash packet", "err", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case err := <-rpcSub.Err():
|
|
||||||
log.Error("State diff service rpcSub error", "err", err)
|
|
||||||
return
|
|
||||||
case <-quitChan:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return rpcSub, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteStateDiffAt writes a state diff object directly to DB at the specific blockheight
|
// WriteStateDiffAt writes a state diff object directly to DB at the specific blockheight
|
||||||
func (api *PublicStateDiffAPI) WriteStateDiffAt(ctx context.Context, blockNumber uint64, params Params) JobID {
|
func (api *PublicAPI) WriteStateDiffAt(ctx context.Context, blockNumber uint64, params Params) JobID {
|
||||||
var err error
|
var err error
|
||||||
i-norden marked this conversation as resolved
Outdated
i-norden
commented
We haven't ever actually used this endpoint, should consider getting rid of it. We haven't ever actually used this endpoint, should consider getting rid of it.
roysc
commented
In that case, I vote for dropping it for now, pending a use case In that case, I vote for dropping it for now, pending a use case
i-norden
commented
That sounds good to me. That sounds good to me.
|
|||||||
start, logger := countApiRequestBegin("writeStateDiffAt", blockNumber)
|
start, logger := countApiRequestBegin("writeStateDiffAt", blockNumber)
|
||||||
defer countApiRequestEnd(start, logger, err)
|
defer countApiRequestEnd(start, logger, err)
|
||||||
@ -145,62 +97,44 @@ func (api *PublicStateDiffAPI) WriteStateDiffAt(ctx context.Context, blockNumber
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteStateDiffFor writes a state diff object directly to DB for the specific block hash
|
// WriteStateDiffFor writes a state diff object directly to DB for the specific block hash
|
||||||
func (api *PublicStateDiffAPI) WriteStateDiffFor(ctx context.Context, blockHash common.Hash, params Params) error {
|
func (api *PublicAPI) WriteStateDiffFor(ctx context.Context, blockHash common.Hash, params Params) error {
|
||||||
var err error
|
var err error
|
||||||
start, logger := countApiRequestBegin("writeStateDiffFor", blockHash.Hex())
|
start, logger := countApiRequestBegin("writeStateDiffFor", blockHash.String())
|
||||||
defer countApiRequestEnd(start, logger, err)
|
defer countApiRequestEnd(start, logger, err)
|
||||||
|
|
||||||
err = api.sds.WriteStateDiffFor(blockHash, params)
|
err = api.sds.WriteStateDiffFor(blockHash, params)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatchAddress changes the list of watched addresses to which the direct indexing is restricted according to given operation
|
// WatchAddress changes the list of watched addresses to which the direct indexing is restricted
|
||||||
func (api *PublicStateDiffAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error {
|
// for the given operation.
|
||||||
|
func (api *PublicAPI) WatchAddress(operation types.OperationType, args []types.WatchAddressArg) error {
|
||||||
return api.sds.WatchAddress(operation, args)
|
return api.sds.WatchAddress(operation, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StreamWrites sets up a subscription that streams the status of completed calls to WriteStateDiff*
|
// StreamWrites sets up a subscription that streams the status of completed calls to WriteStateDiff*
|
||||||
func (api *PublicStateDiffAPI) StreamWrites(ctx context.Context) (*rpc.Subscription, error) {
|
func (api *PublicAPI) StreamWrites(ctx context.Context) (<-chan JobStatus, error) {
|
||||||
// ensure that the RPC connection supports subscriptions
|
|
||||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
|
||||||
if !supported {
|
|
||||||
return nil, rpc.ErrNotificationsUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// create subscription and start waiting for events
|
|
||||||
rpcSub := notifier.CreateSubscription()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// subscribe to events from the statediff service
|
// subscribe to events from the statediff service
|
||||||
statusChan := make(chan JobStatus, chainEventChanSize)
|
statusChan := make(chan JobStatus, chainEventChanSize)
|
||||||
quitChan := make(chan bool, 1)
|
clientChan := make(chan JobStatus, chainEventChanSize)
|
||||||
api.sds.SubscribeWriteStatus(rpcSub.ID, statusChan, quitChan)
|
id := api.sds.SubscribeWriteStatus(statusChan)
|
||||||
|
|
||||||
var err error
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err = api.sds.UnsubscribeWriteStatus(rpcSub.ID); err != nil {
|
close(statusChan)
|
||||||
log.Error("Failed to unsubscribe from job status stream: " + err.Error())
|
close(clientChan)
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
// loop and await payloads and relay them to the subscriber with the notifier
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case status := <-statusChan:
|
case status := <-statusChan:
|
||||||
if err = notifier.Notify(rpcSub.ID, status); err != nil {
|
clientChan <- status
|
||||||
log.Error("Failed to send job status; error: " + err.Error())
|
case <-ctx.Done():
|
||||||
return
|
api.sds.UnsubscribeWriteStatus(id)
|
||||||
}
|
|
||||||
case err = <-rpcSub.Err():
|
|
||||||
if err != nil {
|
|
||||||
log.Error("statediff_StreamWrites RPC subscription error: " + err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-quitChan:
|
|
||||||
// don't need to unsubscribe, service does so before sending the quit signal
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return rpcSub, nil
|
return clientChan, nil
|
||||||
}
|
}
|
||||||
|
121
blockchain.go
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package statediff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
||||||
|
plugeth "github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BlockChain interface {
|
||||||
|
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
|
||||||
|
CurrentBlock() *types.Header
|
||||||
|
GetBlockByHash(hash common.Hash) *types.Block
|
||||||
|
GetBlockByNumber(number uint64) *types.Block
|
||||||
|
GetReceiptsByHash(hash common.Hash) types.Receipts
|
||||||
|
GetTd(hash common.Hash, number uint64) *big.Int
|
||||||
|
StateCache() adapt.StateView
|
||||||
roysc marked this conversation as resolved
Outdated
i-norden
commented
We had this previously because the statediffing process would fall/lag far enough behind head, below the pruning threshold of a full node, such that the full node would sometimes prune away the state before the statediffing service had used it. This shouldn't be a problem anymore after the multitude of improvements made to performance. We had this previously because the statediffing process would fall/lag far enough behind head, below the pruning threshold of a full node, such that the full node would sometimes prune away the state before the statediffing service had used it. This shouldn't be a problem anymore after the multitude of improvements made to performance.
|
|||||||
|
}
|
||||||
|
|
||||||
|
// pluginBlockChain adapts the plugeth Backend to the blockChain interface
|
||||||
|
type pluginBlockChain struct {
|
||||||
|
restricted.Backend
|
||||||
|
ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPluginBlockChain(backend restricted.Backend) BlockChain {
|
||||||
|
return &pluginBlockChain{
|
||||||
|
Backend: backend,
|
||||||
|
ctx: context.Background(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||||
|
bufferChan := make(chan plugeth.ChainEvent, chainEventChanSize)
|
||||||
|
sub := b.Backend.SubscribeChainEvent(bufferChan)
|
||||||
|
go func() {
|
||||||
|
for event := range bufferChan {
|
||||||
|
block := utils.MustDecode[types.Block](event.Block)
|
||||||
|
// Note: logs are processed with receipts while building the payload
|
||||||
|
ch <- core.ChainEvent{
|
||||||
|
Block: block,
|
||||||
|
Hash: common.Hash(event.Hash),
|
||||||
roysc marked this conversation as resolved
Outdated
i-norden
commented
We should get the logs when we lookup the Receipts and pack them into the statediff payload. We should get the logs when we lookup the Receipts and pack them into the statediff payload.
|
|||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return sub
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) CurrentBlock() *types.Header {
|
||||||
|
buf := b.Backend.CurrentBlock()
|
||||||
|
return utils.MustDecode[types.Header](buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) GetBlockByHash(hash common.Hash) *types.Block {
|
||||||
|
buf, err := b.Backend.BlockByHash(b.ctx, plugeth.Hash(hash))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return utils.MustDecode[types.Block](buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) GetBlockByNumber(number uint64) *types.Block {
|
||||||
|
buf, err := b.Backend.BlockByNumber(b.ctx, int64(number))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return utils.MustDecode[types.Block](buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||||
|
buf, err := b.Backend.GetReceipts(b.ctx, plugeth.Hash(hash))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
var receipts types.Receipts
|
||||||
|
err = json.Unmarshal(buf, &receipts)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return receipts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||||
|
return b.Backend.GetTd(b.ctx, plugeth.Hash(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) StateCache() adapt.StateView {
|
||||||
|
return &pluginStateView{backend: b}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *pluginBlockChain) ChainConfig() *params.ChainConfig {
|
||||||
|
return adapt.ChainConfig(b.Backend.ChainConfig())
|
||||||
|
}
|
||||||
|
|
||||||
|
// exposes a StateView from a combination of plugeth's core Backend and cached contract code
|
||||||
|
type pluginStateView struct {
|
||||||
|
backend *pluginBlockChain
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pluginStateView) OpenTrie(root common.Hash) (adapt.StateTrie, error) {
|
||||||
|
t, err := p.backend.GetTrie(plugeth.Hash(root))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return adapt.NewStateTrie(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *pluginStateView) ContractCode(hash common.Hash) ([]byte, error) {
|
||||||
|
return p.backend.GetContractCode(plugeth.Hash(hash))
|
||||||
|
}
|
407
builder.go
@ -21,80 +21,83 @@ package statediff
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
|
||||||
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/trie_helpers"
|
|
||||||
types2 "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/trie_helpers"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
emptyNode, _ = rlp.EncodeToBytes(&[]byte{})
|
emptyNode, _ = rlp.EncodeToBytes(&[]byte{})
|
||||||
emptyContractRoot = crypto.Keccak256Hash(emptyNode)
|
emptyContractRoot = crypto.Keccak256Hash(emptyNode)
|
||||||
nullCodeHash = crypto.Keccak256Hash([]byte{}).Bytes()
|
nullCodeHash = crypto.Keccak256Hash([]byte{}).Bytes()
|
||||||
nullNodeHash = common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
|
zeroHash common.Hash
|
||||||
)
|
)
|
||||||
|
|
||||||
// Builder interface exposes the method for building a state diff between two blocks
|
// Builder interface exposes the method for building a state diff between two blocks
|
||||||
type Builder interface {
|
type Builder interface {
|
||||||
BuildStateDiffObject(args Args, params Params) (types2.StateObject, error)
|
BuildStateDiffObject(args Args, params Params) (sdtypes.StateObject, error)
|
||||||
WriteStateDiffObject(args Args, params Params, output types2.StateNodeSink, ipldOutput types2.IPLDSink) error
|
WriteStateDiffObject(args Args, params Params, output sdtypes.StateNodeSink, ipldOutput sdtypes.IPLDSink) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type StateDiffBuilder struct {
|
type StateDiffBuilder struct {
|
||||||
StateCache state.Database
|
StateCache adapt.StateView
|
||||||
}
|
}
|
||||||
|
|
||||||
type IterPair struct {
|
type IterPair struct {
|
||||||
Older, Newer trie.NodeIterator
|
Older, Newer trie.NodeIterator
|
||||||
}
|
}
|
||||||
|
|
||||||
func StateNodeAppender(nodes *[]types2.StateLeafNode) types2.StateNodeSink {
|
func StateNodeAppender(nodes *[]sdtypes.StateLeafNode) sdtypes.StateNodeSink {
|
||||||
return func(node types2.StateLeafNode) error {
|
return func(node sdtypes.StateLeafNode) error {
|
||||||
*nodes = append(*nodes, node)
|
*nodes = append(*nodes, node)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func StorageNodeAppender(nodes *[]types2.StorageLeafNode) types2.StorageNodeSink {
|
func StorageNodeAppender(nodes *[]sdtypes.StorageLeafNode) sdtypes.StorageNodeSink {
|
||||||
return func(node types2.StorageLeafNode) error {
|
return func(node sdtypes.StorageLeafNode) error {
|
||||||
*nodes = append(*nodes, node)
|
*nodes = append(*nodes, node)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func IPLDMappingAppender(iplds *[]types2.IPLD) types2.IPLDSink {
|
func IPLDMappingAppender(iplds *[]sdtypes.IPLD) sdtypes.IPLDSink {
|
||||||
return func(c types2.IPLD) error {
|
return func(c sdtypes.IPLD) error {
|
||||||
*iplds = append(*iplds, c)
|
*iplds = append(*iplds, c)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuilder is used to create a statediff builder
|
// NewBuilder is used to create a statediff builder
|
||||||
func NewBuilder(stateCache state.Database) Builder {
|
func NewBuilder(stateCache adapt.StateView) Builder {
|
||||||
return &StateDiffBuilder{
|
return &StateDiffBuilder{
|
||||||
StateCache: stateCache, // state cache is safe for concurrent reads
|
StateCache: stateCache, // state cache is safe for concurrent reads
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
// BuildStateDiffObject builds a statediff object from two blocks and the provided parameters
|
||||||
func (sdb *StateDiffBuilder) BuildStateDiffObject(args Args, params Params) (types2.StateObject, error) {
|
func (sdb *StateDiffBuilder) BuildStateDiffObject(args Args, params Params) (sdtypes.StateObject, error) {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildStateDiffObjectTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildStateDiffObjectTimer)
|
||||||
var stateNodes []types2.StateLeafNode
|
var stateNodes []sdtypes.StateLeafNode
|
||||||
var iplds []types2.IPLD
|
var iplds []sdtypes.IPLD
|
||||||
err := sdb.WriteStateDiffObject(args, params, StateNodeAppender(&stateNodes), IPLDMappingAppender(&iplds))
|
err := sdb.WriteStateDiffObject(args, params, StateNodeAppender(&stateNodes), IPLDMappingAppender(&iplds))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types2.StateObject{}, err
|
return sdtypes.StateObject{}, err
|
||||||
}
|
}
|
||||||
return types2.StateObject{
|
return sdtypes.StateObject{
|
||||||
BlockHash: args.BlockHash,
|
BlockHash: args.BlockHash,
|
||||||
BlockNumber: args.BlockNumber,
|
BlockNumber: args.BlockNumber,
|
||||||
Nodes: stateNodes,
|
Nodes: stateNodes,
|
||||||
@ -103,17 +106,17 @@ func (sdb *StateDiffBuilder) BuildStateDiffObject(args Args, params Params) (typ
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteStateDiffObject writes a statediff object to output sinks
|
// WriteStateDiffObject writes a statediff object to output sinks
|
||||||
func (sdb *StateDiffBuilder) WriteStateDiffObject(args Args, params Params, output types2.StateNodeSink,
|
func (sdb *StateDiffBuilder) WriteStateDiffObject(args Args, params Params, output sdtypes.StateNodeSink,
|
||||||
ipldOutput types2.IPLDSink) error {
|
ipldOutput sdtypes.IPLDSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.WriteStateDiffObjectTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.WriteStateDiffObjectTimer)
|
||||||
// Load tries for old and new states
|
// Load tries for old and new states
|
||||||
oldTrie, err := sdb.StateCache.OpenTrie(args.OldStateRoot)
|
oldTrie, err := sdb.StateCache.OpenTrie(args.OldStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating trie for oldStateRoot: %v", err)
|
return fmt.Errorf("error creating trie for oldStateRoot: %w", err)
|
||||||
}
|
}
|
||||||
newTrie, err := sdb.StateCache.OpenTrie(args.NewStateRoot)
|
newTrie, err := sdb.StateCache.OpenTrie(args.NewStateRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating trie for newStateRoot: %v", err)
|
return fmt.Errorf("error creating trie for newStateRoot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we do two state trie iterations:
|
// we do two state trie iterations:
|
||||||
@ -131,21 +134,21 @@ func (sdb *StateDiffBuilder) WriteStateDiffObject(args Args, params Params, outp
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := log.New("hash", args.BlockHash.Hex(), "number", args.BlockNumber)
|
logger := log.New("hash", args.BlockHash.String(), "number", args.BlockNumber)
|
||||||
return sdb.BuildStateDiffWithIntermediateStateNodes(iterPairs, params, output, ipldOutput, logger, nil)
|
return sdb.BuildStateDiff(iterPairs, params, output, ipldOutput, logger, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *StateDiffBuilder) BuildStateDiffWithIntermediateStateNodes(iterPairs []IterPair, params Params,
|
func (sdb *StateDiffBuilder) BuildStateDiff(iterPairs []IterPair, params Params,
|
||||||
output types2.StateNodeSink, ipldOutput types2.IPLDSink, logger log.Logger, prefixPath []byte) error {
|
output sdtypes.StateNodeSink, ipldOutput sdtypes.IPLDSink, logger log.Logger, prefixPath []byte) error {
|
||||||
logger.Debug("statediff BEGIN BuildStateDiffWithIntermediateStateNodes")
|
logger.Trace("statediff BEGIN BuildStateDiff")
|
||||||
defer metrics2.ReportAndUpdateDuration("statediff END BuildStateDiffWithIntermediateStateNodes", time.Now(), logger, metrics2.IndexerMetrics.BuildStateDiffWithIntermediateStateNodesTimer)
|
defer metrics.ReportAndUpdateDuration("statediff END BuildStateDiff", time.Now(), logger, metrics.IndexerMetrics.BuildStateDiffTimer)
|
||||||
// collect a slice of all the nodes that were touched and exist at B (B-A)
|
// collect a slice of all the nodes that were touched and exist at B (B-A)
|
||||||
// a map of their leafkey to all the accounts that were touched and exist at B
|
// a map of their leafkey to all the accounts that were touched and exist at B
|
||||||
// and a slice of all the paths for the nodes in both of the above sets
|
// and a slice of all the paths for the nodes in both of the above sets
|
||||||
diffAccountsAtB, err := sdb.createdAndUpdatedState(
|
diffAccountsAtB, err := sdb.createdAndUpdatedState(
|
||||||
iterPairs[0].Older, iterPairs[0].Newer, params.watchedAddressesLeafPaths, ipldOutput, logger, prefixPath)
|
iterPairs[0].Older, iterPairs[0].Newer, params.watchedAddressesLeafPaths, ipldOutput, logger, prefixPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting createdAndUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting createdAndUpdatedNodes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// collect a slice of all the nodes that existed at a path in A that doesn't exist in B
|
// collect a slice of all the nodes that existed at a path in A that doesn't exist in B
|
||||||
@ -154,14 +157,14 @@ func (sdb *StateDiffBuilder) BuildStateDiffWithIntermediateStateNodes(iterPairs
|
|||||||
iterPairs[1].Older, iterPairs[1].Newer, diffAccountsAtB,
|
iterPairs[1].Older, iterPairs[1].Newer, diffAccountsAtB,
|
||||||
params.watchedAddressesLeafPaths, output, logger, prefixPath)
|
params.watchedAddressesLeafPaths, output, logger, prefixPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %v", err)
|
return fmt.Errorf("error collecting deletedOrUpdatedNodes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// collect and sort the leafkey keys for both account mappings into a slice
|
// collect and sort the leafkey keys for both account mappings into a slice
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
createKeys := trie_helpers.SortKeys(diffAccountsAtB)
|
createKeys := trie_helpers.SortKeys(diffAccountsAtB)
|
||||||
deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
|
deleteKeys := trie_helpers.SortKeys(diffAccountsAtA)
|
||||||
logger.Debug(fmt.Sprintf("statediff BuildStateDiffWithIntermediateStateNodes sort duration=%dms", time.Since(t).Milliseconds()))
|
logger.Debug("statediff BuildStateDiff sort", "duration", time.Since(t))
|
||||||
|
|
||||||
// and then find the intersection of these keys
|
// and then find the intersection of these keys
|
||||||
// these are the leafkeys for the accounts which exist at both A and B but are different
|
// these are the leafkeys for the accounts which exist at both A and B but are different
|
||||||
@ -169,19 +172,19 @@ func (sdb *StateDiffBuilder) BuildStateDiffWithIntermediateStateNodes(iterPairs
|
|||||||
// and leaving the truly created or deleted keys in place
|
// and leaving the truly created or deleted keys in place
|
||||||
t = time.Now()
|
t = time.Now()
|
||||||
updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
|
updatedKeys := trie_helpers.FindIntersection(createKeys, deleteKeys)
|
||||||
logger.Debug(fmt.Sprintf("statediff BuildStateDiffWithIntermediateStateNodes intersection count=%d duration=%dms",
|
logger.Debug("statediff BuildStateDiff intersection",
|
||||||
len(updatedKeys),
|
"count", len(updatedKeys),
|
||||||
time.Since(t).Milliseconds()))
|
"duration", time.Since(t))
|
||||||
|
|
||||||
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
// build the diff nodes for the updated accounts using the mappings at both A and B as directed by the keys found as the intersection of the two
|
||||||
err = sdb.buildAccountUpdates(diffAccountsAtB, diffAccountsAtA, updatedKeys, output, ipldOutput, logger)
|
err = sdb.buildAccountUpdates(diffAccountsAtB, diffAccountsAtA, updatedKeys, output, ipldOutput, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building diff for updated accounts: %v", err)
|
return fmt.Errorf("error building diff for updated accounts: %w", err)
|
||||||
}
|
}
|
||||||
// build the diff nodes for created accounts
|
// build the diff nodes for created accounts
|
||||||
err = sdb.buildAccountCreations(diffAccountsAtB, output, ipldOutput, logger)
|
err = sdb.buildAccountCreations(diffAccountsAtB, output, ipldOutput, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building diff for created accounts: %v", err)
|
return fmt.Errorf("error building diff for created accounts: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -191,22 +194,23 @@ func (sdb *StateDiffBuilder) BuildStateDiffWithIntermediateStateNodes(iterPairs
|
|||||||
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
// a mapping of their leafkeys to all the accounts that exist in a different state at B than A
|
||||||
// and a slice of the paths for all of the nodes included in both
|
// and a slice of the paths for all of the nodes included in both
|
||||||
func (sdb *StateDiffBuilder) createdAndUpdatedState(a, b trie.NodeIterator,
|
func (sdb *StateDiffBuilder) createdAndUpdatedState(a, b trie.NodeIterator,
|
||||||
watchedAddressesLeafPaths [][]byte, output types2.IPLDSink, logger log.Logger, prefixPath []byte) (types2.AccountMap, error) {
|
watchedAddressesLeafPaths [][]byte, output sdtypes.IPLDSink, logger log.Logger, prefixPath []byte) (sdtypes.AccountMap, error) {
|
||||||
logger.Debug("statediff BEGIN createdAndUpdatedState")
|
logger.Trace("statediff BEGIN createdAndUpdatedState")
|
||||||
defer metrics2.ReportAndUpdateDuration("statediff END createdAndUpdatedState", time.Now(), logger, metrics2.IndexerMetrics.CreatedAndUpdatedStateTimer)
|
defer metrics.ReportAndUpdateDuration("statediff END createdAndUpdatedState", time.Now(), logger, metrics.IndexerMetrics.CreatedAndUpdatedStateTimer)
|
||||||
diffAccountsAtB := make(types2.AccountMap)
|
diffAccountsAtB := make(sdtypes.AccountMap)
|
||||||
watchingAddresses := len(watchedAddressesLeafPaths) > 0
|
|
||||||
|
|
||||||
|
// cache the RLP of the previous node, so when we hit a leaf we have the parent (containing) node
|
||||||
|
var prevBlob []byte
|
||||||
it, itCount := trie.NewDifferenceIterator(a, b)
|
it, itCount := trie.NewDifferenceIterator(a, b)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// ignore node if it is not along paths of interest
|
// ignore node if it is not along paths of interest
|
||||||
if watchingAddresses && !isValidPrefixPath(watchedAddressesLeafPaths, append(prefixPath, it.Path()...)) {
|
if !isWatchedPathPrefix(watchedAddressesLeafPaths, it.Path()) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// index values by leaf key
|
// index values by leaf key
|
||||||
if it.Leaf() {
|
if it.Leaf() {
|
||||||
// if it is a "value" node, we will index the value by leaf key
|
// if it is a "value" node, we will index the value by leaf key
|
||||||
accountW, err := sdb.processStateValueNode(it, watchedAddressesLeafPaths, prefixPath)
|
accountW, err := sdb.processStateValueNode(it, prevBlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -216,14 +220,17 @@ func (sdb *StateDiffBuilder) createdAndUpdatedState(a, b trie.NodeIterator,
|
|||||||
// for now, just add it to diffAccountsAtB
|
// for now, just add it to diffAccountsAtB
|
||||||
// we will compare to diffAccountsAtA to determine which diffAccountsAtB
|
// we will compare to diffAccountsAtA to determine which diffAccountsAtB
|
||||||
// were creations and which were updates and also identify accounts that were removed going A->B
|
// were creations and which were updates and also identify accounts that were removed going A->B
|
||||||
diffAccountsAtB[common.Bytes2Hex(accountW.LeafKey)] = *accountW
|
diffAccountsAtB[hex.EncodeToString(accountW.LeafKey)] = *accountW
|
||||||
} else { // trie nodes will be written to blockstore only
|
} else {
|
||||||
// reminder that this includes leaf nodes, since the geth iterator.Leaf() actually signifies a "value" node
|
// trie nodes will be written to blockstore only
|
||||||
if bytes.Equal(it.Hash().Bytes(), nullNodeHash) {
|
// reminder that this includes leaf nodes, since the geth iterator.Leaf() actually
|
||||||
|
// signifies a "value" node
|
||||||
|
if it.Hash() == zeroHash {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
nodeVal := make([]byte, len(it.NodeBlob()))
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
||||||
copy(nodeVal, it.NodeBlob())
|
copy(nodeVal, it.NodeBlob())
|
||||||
|
// if doing a selective diff, we need to ensure this is a watched path
|
||||||
if len(watchedAddressesLeafPaths) > 0 {
|
if len(watchedAddressesLeafPaths) > 0 {
|
||||||
var elements []interface{}
|
var elements []interface{}
|
||||||
if err := rlp.DecodeBytes(nodeVal, &elements); err != nil {
|
if err := rlp.DecodeBytes(nodeVal, &elements); err != nil {
|
||||||
@ -233,103 +240,74 @@ func (sdb *StateDiffBuilder) createdAndUpdatedState(a, b trie.NodeIterator,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ok {
|
partialPath := utils.CompactToHex(elements[0].([]byte))
|
||||||
nodePath := append(prefixPath, it.Path()...)
|
valueNodePath := append(it.Path(), partialPath...)
|
||||||
partialPath := trie.CompactToHex(elements[0].([]byte))
|
if ok && !isWatchedPath(watchedAddressesLeafPaths, valueNodePath) {
|
||||||
valueNodePath := append(nodePath, partialPath...)
|
|
||||||
if !isWatchedAddress(watchedAddressesLeafPaths, valueNodePath) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
if err := output(sdtypes.IPLD{
|
||||||
nodeHash := make([]byte, len(it.Hash().Bytes()))
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, it.Hash().Bytes()).String(),
|
||||||
copy(nodeHash, it.Hash().Bytes())
|
|
||||||
if err := output(types2.IPLD{
|
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, nodeHash).String(),
|
|
||||||
Content: nodeVal,
|
Content: nodeVal,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
prevBlob = nodeVal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Debug("statediff COUNTS createdAndUpdatedStateWithIntermediateNodes", "it", itCount, "diffAccountsAtB", len(diffAccountsAtB))
|
logger.Debug("statediff COUNTS createdAndUpdatedState", "it", itCount, "diffAccountsAtB", len(diffAccountsAtB))
|
||||||
metrics2.IndexerMetrics.DifferenceIteratorCounter.Inc(int64(*itCount))
|
metrics.IndexerMetrics.DifferenceIteratorCounter.Inc(int64(*itCount))
|
||||||
return diffAccountsAtB, it.Error()
|
return diffAccountsAtB, it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" which is not something that actually exists in an MMPT
|
// decodes account at leaf and encodes RLP data to CID
|
||||||
func (sdb *StateDiffBuilder) processStateValueNode(it trie.NodeIterator, watchedAddressesLeafPaths [][]byte, prefixPath []byte) (*types2.AccountWrapper, error) {
|
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" (which is not something
|
||||||
// skip if it is not a watched address
|
// that actually exists in an MMPT), therefore we pass the parent node blob as the leaf RLP.
|
||||||
// If we aren't watching any specific addresses, we are watching everything
|
func (sdb *StateDiffBuilder) processStateValueNode(it trie.NodeIterator, parentBlob []byte) (*sdtypes.AccountWrapper, error) {
|
||||||
if len(watchedAddressesLeafPaths) > 0 && !isWatchedAddress(watchedAddressesLeafPaths, append(prefixPath, it.Path()...)) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// since this is a "value node", we need to move up to the "parent" node which is the actual leaf node
|
|
||||||
// it should be in the fastcache since it necessarily was recently accessed to reach the current node
|
|
||||||
parentNodeRLP, err := sdb.StateCache.TrieDB().Node(it.Parent())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var nodeElements []interface{}
|
|
||||||
if err = rlp.DecodeBytes(parentNodeRLP, &nodeElements); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
parentSubPath := make([]byte, len(it.ParentPath()))
|
|
||||||
copy(parentSubPath, it.ParentPath())
|
|
||||||
parentPath := append(prefixPath, parentSubPath...)
|
|
||||||
partialPath := trie.CompactToHex(nodeElements[0].([]byte))
|
|
||||||
valueNodePath := append(parentPath, partialPath...)
|
|
||||||
encodedPath := trie.HexToCompact(valueNodePath)
|
|
||||||
leafKey := encodedPath[1:]
|
|
||||||
|
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
accountRLP := make([]byte, len(it.LeafBlob()))
|
if err := rlp.DecodeBytes(it.LeafBlob(), &account); err != nil {
|
||||||
copy(accountRLP, it.LeafBlob())
|
return nil, fmt.Errorf("error decoding account at leaf key %x: %w", it.LeafKey(), err)
|
||||||
roysc marked this conversation as resolved
Outdated
i-norden
commented
Nice, removing these unneccesary/redundant allocations and Nice, removing these unneccesary/redundant allocations and `copy`s could have a noticeable impact on performance considering the number of times these operations are performed per block/statediff
|
|||||||
if err := rlp.DecodeBytes(accountRLP, &account); err != nil {
|
|
||||||
return nil, fmt.Errorf("error decoding account for leaf value at leaf key %x\nerror: %v", leafKey, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &types2.AccountWrapper{
|
return &sdtypes.AccountWrapper{
|
||||||
LeafKey: leafKey,
|
LeafKey: it.LeafKey(),
|
||||||
Account: &account,
|
Account: &account,
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(parentNodeRLP)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(parentBlob)).String(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// deletedOrUpdatedState returns a slice of all the pathes that are emptied at B
|
// deletedOrUpdatedState returns a slice of all the paths that are emptied at B
|
||||||
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
// and a mapping of their leafkeys to all the accounts that exist in a different state at A than B
|
||||||
func (sdb *StateDiffBuilder) deletedOrUpdatedState(a, b trie.NodeIterator, diffAccountsAtB types2.AccountMap,
|
func (sdb *StateDiffBuilder) deletedOrUpdatedState(a, b trie.NodeIterator, diffAccountsAtB sdtypes.AccountMap,
|
||||||
watchedAddressesLeafPaths [][]byte, output types2.StateNodeSink, logger log.Logger, prefixPath []byte) (types2.AccountMap, error) {
|
watchedAddressesLeafPaths [][]byte, output sdtypes.StateNodeSink, logger log.Logger, prefixPath []byte) (sdtypes.AccountMap, error) {
|
||||||
logger.Debug("statediff BEGIN deletedOrUpdatedState")
|
logger.Trace("statediff BEGIN deletedOrUpdatedState")
|
||||||
defer metrics2.ReportAndUpdateDuration("statediff END deletedOrUpdatedState", time.Now(), logger, metrics2.IndexerMetrics.DeletedOrUpdatedStateTimer)
|
defer metrics.ReportAndUpdateDuration("statediff END deletedOrUpdatedState", time.Now(), logger, metrics.IndexerMetrics.DeletedOrUpdatedStateTimer)
|
||||||
diffAccountAtA := make(types2.AccountMap)
|
diffAccountAtA := make(sdtypes.AccountMap)
|
||||||
watchingAddresses := len(watchedAddressesLeafPaths) > 0
|
|
||||||
|
|
||||||
|
var prevBlob []byte
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
// ignore node if it is not along paths of interest
|
if !isWatchedPathPrefix(watchedAddressesLeafPaths, it.Path()) {
|
||||||
if watchingAddresses && !isValidPrefixPath(watchedAddressesLeafPaths, append(prefixPath, it.Path()...)) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if it.Leaf() {
|
if it.Leaf() {
|
||||||
accountW, err := sdb.processStateValueNode(it, watchedAddressesLeafPaths, prefixPath)
|
accountW, err := sdb.processStateValueNode(it, prevBlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if accountW == nil {
|
if accountW == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
leafKey := common.Bytes2Hex(accountW.LeafKey)
|
leafKey := hex.EncodeToString(accountW.LeafKey)
|
||||||
diffAccountAtA[leafKey] = *accountW
|
diffAccountAtA[leafKey] = *accountW
|
||||||
// if this node's leaf key did not show up in diffAccountsAtB
|
// if this node's leaf key did not show up in diffAccountsAtB
|
||||||
// that means the account was deleted
|
// that means the account was deleted
|
||||||
// in that case, emit an empty "removed" diff state node
|
// in that case, emit an empty "removed" diff state node
|
||||||
// include empty "removed" diff storage nodes for all the storage slots
|
// include empty "removed" diff storage nodes for all the storage slots
|
||||||
if _, ok := diffAccountsAtB[leafKey]; !ok {
|
if _, ok := diffAccountsAtB[leafKey]; !ok {
|
||||||
diff := types2.StateLeafNode{
|
diff := sdtypes.StateLeafNode{
|
||||||
AccountWrapper: types2.AccountWrapper{
|
AccountWrapper: sdtypes.AccountWrapper{
|
||||||
Account: nil,
|
Account: nil,
|
||||||
LeafKey: accountW.LeafKey,
|
LeafKey: accountW.LeafKey,
|
||||||
CID: shared.RemovedNodeStateCID,
|
CID: shared.RemovedNodeStateCID,
|
||||||
@ -337,16 +315,19 @@ func (sdb *StateDiffBuilder) deletedOrUpdatedState(a, b trie.NodeIterator, diffA
|
|||||||
Removed: true,
|
Removed: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
storageDiff := make([]types2.StorageLeafNode, 0)
|
storageDiff := make([]sdtypes.StorageLeafNode, 0)
|
||||||
err := sdb.buildRemovedAccountStorageNodes(accountW.Account.Root, StorageNodeAppender(&storageDiff))
|
err := sdb.buildRemovedAccountStorageNodes(accountW.Account.Root, StorageNodeAppender(&storageDiff))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed building storage diffs for removed state account with key %x\r\nerror: %v", leafKey, err)
|
return nil, fmt.Errorf("failed building storage diffs for removed state account with key %x\r\nerror: %w", leafKey, err)
|
||||||
}
|
}
|
||||||
diff.StorageDiff = storageDiff
|
diff.StorageDiff = storageDiff
|
||||||
if err := output(diff); err != nil {
|
if err := output(diff); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
prevBlob = make([]byte, len(it.NodeBlob()))
|
||||||
|
copy(prevBlob, it.NodeBlob())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return diffAccountAtA, it.Error()
|
return diffAccountAtA, it.Error()
|
||||||
@ -356,25 +337,27 @@ func (sdb *StateDiffBuilder) deletedOrUpdatedState(a, b trie.NodeIterator, diffA
|
|||||||
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
// to generate the statediff node objects for all of the accounts that existed at both A and B but in different states
|
||||||
// needs to be called before building account creations and deletions as this mutates
|
// needs to be called before building account creations and deletions as this mutates
|
||||||
// those account maps to remove the accounts which were updated
|
// those account maps to remove the accounts which were updated
|
||||||
func (sdb *StateDiffBuilder) buildAccountUpdates(creations, deletions types2.AccountMap, updatedKeys []string,
|
func (sdb *StateDiffBuilder) buildAccountUpdates(creations, deletions sdtypes.AccountMap, updatedKeys []string,
|
||||||
output types2.StateNodeSink, ipldOutput types2.IPLDSink, logger log.Logger) error {
|
output sdtypes.StateNodeSink, ipldOutput sdtypes.IPLDSink, logger log.Logger) error {
|
||||||
logger.Debug("statediff BEGIN buildAccountUpdates", "creations", len(creations), "deletions", len(deletions), "updatedKeys", len(updatedKeys))
|
logger.Trace("statediff BEGIN buildAccountUpdates",
|
||||||
defer metrics2.ReportAndUpdateDuration("statediff END buildAccountUpdates ", time.Now(), logger, metrics2.IndexerMetrics.BuildAccountUpdatesTimer)
|
"creations", len(creations), "deletions", len(deletions), "updated", len(updatedKeys))
|
||||||
|
defer metrics.ReportAndUpdateDuration("statediff END buildAccountUpdates ",
|
||||||
|
time.Now(), logger, metrics.IndexerMetrics.BuildAccountUpdatesTimer)
|
||||||
var err error
|
var err error
|
||||||
for _, key := range updatedKeys {
|
for _, key := range updatedKeys {
|
||||||
createdAcc := creations[key]
|
createdAcc := creations[key]
|
||||||
deletedAcc := deletions[key]
|
deletedAcc := deletions[key]
|
||||||
storageDiff := make([]types2.StorageLeafNode, 0)
|
storageDiff := make([]sdtypes.StorageLeafNode, 0)
|
||||||
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
if deletedAcc.Account != nil && createdAcc.Account != nil {
|
||||||
oldSR := deletedAcc.Account.Root
|
|
||||||
newSR := createdAcc.Account.Root
|
|
||||||
err = sdb.buildStorageNodesIncremental(
|
err = sdb.buildStorageNodesIncremental(
|
||||||
oldSR, newSR, StorageNodeAppender(&storageDiff), ipldOutput)
|
deletedAcc.Account.Root, createdAcc.Account.Root,
|
||||||
|
StorageNodeAppender(&storageDiff), ipldOutput,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %s\r\nerror: %v", key, err)
|
return fmt.Errorf("failed building incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = output(types2.StateLeafNode{
|
if err = output(sdtypes.StateLeafNode{
|
||||||
AccountWrapper: createdAcc,
|
AccountWrapper: createdAcc,
|
||||||
Removed: false,
|
Removed: false,
|
||||||
StorageDiff: storageDiff,
|
StorageDiff: storageDiff,
|
||||||
@ -390,31 +373,32 @@ func (sdb *StateDiffBuilder) buildAccountUpdates(creations, deletions types2.Acc
|
|||||||
|
|
||||||
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
// buildAccountCreations returns the statediff node objects for all the accounts that exist at B but not at A
|
||||||
// it also returns the code and codehash for created contract accounts
|
// it also returns the code and codehash for created contract accounts
|
||||||
func (sdb *StateDiffBuilder) buildAccountCreations(accounts types2.AccountMap, output types2.StateNodeSink,
|
func (sdb *StateDiffBuilder) buildAccountCreations(accounts sdtypes.AccountMap, output sdtypes.StateNodeSink,
|
||||||
ipldOutput types2.IPLDSink, logger log.Logger) error {
|
ipldOutput sdtypes.IPLDSink, logger log.Logger) error {
|
||||||
logger.Debug("statediff BEGIN buildAccountCreations")
|
logger.Trace("statediff BEGIN buildAccountCreations")
|
||||||
defer metrics2.ReportAndUpdateDuration("statediff END buildAccountCreations", time.Now(), logger, metrics2.IndexerMetrics.BuildAccountCreationsTimer)
|
defer metrics.ReportAndUpdateDuration("statediff END buildAccountCreations",
|
||||||
|
time.Now(), logger, metrics.IndexerMetrics.BuildAccountCreationsTimer)
|
||||||
for _, val := range accounts {
|
for _, val := range accounts {
|
||||||
diff := types2.StateLeafNode{
|
diff := sdtypes.StateLeafNode{
|
||||||
AccountWrapper: val,
|
AccountWrapper: val,
|
||||||
Removed: false,
|
Removed: false,
|
||||||
}
|
}
|
||||||
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
if !bytes.Equal(val.Account.CodeHash, nullCodeHash) {
|
||||||
// For contract creations, any storage node contained is a diff
|
// For contract creations, any storage node contained is a diff
|
||||||
storageDiff := make([]types2.StorageLeafNode, 0)
|
storageDiff := make([]sdtypes.StorageLeafNode, 0)
|
||||||
err := sdb.buildStorageNodesEventual(val.Account.Root, StorageNodeAppender(&storageDiff), ipldOutput)
|
err := sdb.buildStorageNodesEventual(val.Account.Root, StorageNodeAppender(&storageDiff), ipldOutput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed building eventual storage diffs for node with leaf key %x\r\nerror: %v", val.LeafKey, err)
|
return fmt.Errorf("failed building eventual storage diffs for node with leaf key %x\r\nerror: %w", val.LeafKey, err)
|
||||||
}
|
}
|
||||||
diff.StorageDiff = storageDiff
|
diff.StorageDiff = storageDiff
|
||||||
// emit codehash => code mappings for contract
|
// emit codehash => code mappings for contract
|
||||||
codeHash := common.BytesToHash(val.Account.CodeHash)
|
codeHash := common.BytesToHash(val.Account.CodeHash)
|
||||||
code, err := sdb.StateCache.ContractCode(common.Hash{}, codeHash)
|
code, err := sdb.StateCache.ContractCode(codeHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to retrieve code for codehash %s\r\n error: %v", codeHash.String(), err)
|
return fmt.Errorf("failed to retrieve code for codehash %x\r\n error: %w", codeHash, err)
|
||||||
}
|
}
|
||||||
if err := ipldOutput(types2.IPLD{
|
if err := ipldOutput(sdtypes.IPLD{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.RawBinary, codeHash.Bytes()).String(),
|
CID: ipld.Keccak256ToCid(ipld.RawBinary, codeHash.Bytes()).String(),
|
||||||
Content: code,
|
Content: code,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -430,106 +414,89 @@ func (sdb *StateDiffBuilder) buildAccountCreations(accounts types2.AccountMap, o
|
|||||||
|
|
||||||
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
// buildStorageNodesEventual builds the storage diff node objects for a created account
|
||||||
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
// i.e. it returns all the storage nodes at this state, since there is no previous state
|
||||||
func (sdb *StateDiffBuilder) buildStorageNodesEventual(sr common.Hash, output types2.StorageNodeSink,
|
func (sdb *StateDiffBuilder) buildStorageNodesEventual(sr common.Hash, output sdtypes.StorageNodeSink,
|
||||||
ipldOutput types2.IPLDSink) error {
|
ipldOutput sdtypes.IPLDSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildStorageNodesEventualTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildStorageNodesEventualTimer)
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
if sr == emptyContractRoot {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Debug("Storage Root For Eventual Diff", "root", sr.Hex())
|
log.Debug("Storage root for eventual diff", "root", sr.String())
|
||||||
sTrie, err := sdb.StateCache.OpenTrie(sr)
|
sTrie, err := sdb.StateCache.OpenTrie(sr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("error in build storage diff eventual", "error", err)
|
log.Info("error in build storage diff eventual", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
it := sTrie.NodeIterator(make([]byte, 0))
|
it := sTrie.NodeIterator(make([]byte, 0))
|
||||||
err = sdb.buildStorageNodesFromTrie(it, output, ipldOutput)
|
return sdb.buildStorageNodesFromTrie(it, output, ipldOutput)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node interator
|
// buildStorageNodesFromTrie returns all the storage diff node objects in the provided node iterator
|
||||||
// including intermediate nodes can be turned on or off
|
func (sdb *StateDiffBuilder) buildStorageNodesFromTrie(it trie.NodeIterator, output sdtypes.StorageNodeSink,
|
||||||
func (sdb *StateDiffBuilder) buildStorageNodesFromTrie(it trie.NodeIterator, output types2.StorageNodeSink,
|
ipldOutput sdtypes.IPLDSink) error {
|
||||||
ipldOutput types2.IPLDSink) error {
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildStorageNodesFromTrieTimer)
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildStorageNodesFromTrieTimer)
|
|
||||||
|
var prevBlob []byte
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
if it.Leaf() {
|
if it.Leaf() {
|
||||||
storageLeafNode, err := sdb.processStorageValueNode(it)
|
storageLeafNode := sdb.processStorageValueNode(it, prevBlob)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := output(storageLeafNode); err != nil {
|
if err := output(storageLeafNode); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
nodeVal := make([]byte, len(it.NodeBlob()))
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
||||||
copy(nodeVal, it.NodeBlob())
|
copy(nodeVal, it.NodeBlob())
|
||||||
nodeHash := make([]byte, len(it.Hash().Bytes()))
|
if err := ipldOutput(sdtypes.IPLD{
|
||||||
copy(nodeHash, it.Hash().Bytes())
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, it.Hash().Bytes()).String(),
|
||||||
if err := ipldOutput(types2.IPLD{
|
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStorageTrie, nodeHash).String(),
|
|
||||||
Content: nodeVal,
|
Content: nodeVal,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
prevBlob = nodeVal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" which is not something that actually exists in an MMPT
|
// decodes account at leaf and encodes RLP data to CID
|
||||||
func (sdb *StateDiffBuilder) processStorageValueNode(it trie.NodeIterator) (types2.StorageLeafNode, error) {
|
// reminder: it.Leaf() == true when the iterator is positioned at a "value node" (which is not something
|
||||||
// skip if it is not a watched address
|
// that actually exists in an MMPT), therefore we pass the parent node blob as the leaf RLP.
|
||||||
|
func (sdb *StateDiffBuilder) processStorageValueNode(it trie.NodeIterator, parentBlob []byte) sdtypes.StorageLeafNode {
|
||||||
leafKey := make([]byte, len(it.LeafKey()))
|
leafKey := make([]byte, len(it.LeafKey()))
|
||||||
copy(leafKey, it.LeafKey())
|
copy(leafKey, it.LeafKey())
|
||||||
value := make([]byte, len(it.LeafBlob()))
|
value := make([]byte, len(it.LeafBlob()))
|
||||||
copy(value, it.LeafBlob())
|
copy(value, it.LeafBlob())
|
||||||
|
|
||||||
// since this is a "value node", we need to move up to the "parent" node which is the actual leaf node
|
return sdtypes.StorageLeafNode{
|
||||||
// it should be in the fastcache since it necessarily was recently accessed to reach the current node
|
|
||||||
parentNodeRLP, err := sdb.StateCache.TrieDB().Node(it.Parent())
|
|
||||||
if err != nil {
|
|
||||||
return types2.StorageLeafNode{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return types2.StorageLeafNode{
|
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
Value: value,
|
Value: value,
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStorageTrie, crypto.Keccak256(parentNodeRLP)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(parentBlob)).String(),
|
||||||
}, nil
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildRemovedAccountStorageNodes builds the "removed" diffs for all the storage nodes for a destroyed account
|
// buildRemovedAccountStorageNodes builds the "removed" diffs for all the storage nodes for a destroyed account
|
||||||
func (sdb *StateDiffBuilder) buildRemovedAccountStorageNodes(sr common.Hash, output types2.StorageNodeSink) error {
|
func (sdb *StateDiffBuilder) buildRemovedAccountStorageNodes(sr common.Hash, output sdtypes.StorageNodeSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildRemovedAccountStorageNodesTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildRemovedAccountStorageNodesTimer)
|
||||||
if bytes.Equal(sr.Bytes(), emptyContractRoot.Bytes()) {
|
if sr == emptyContractRoot {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Debug("Storage Root For Removed Diffs", "root", sr.Hex())
|
log.Debug("Storage root for removed diffs", "root", sr.String())
|
||||||
sTrie, err := sdb.StateCache.OpenTrie(sr)
|
sTrie, err := sdb.StateCache.OpenTrie(sr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("error in build removed account storage diffs", "error", err)
|
log.Info("error in build removed account storage diffs", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
it := sTrie.NodeIterator(make([]byte, 0))
|
it := sTrie.NodeIterator(make([]byte, 0))
|
||||||
err = sdb.buildRemovedStorageNodesFromTrie(it, output)
|
return sdb.buildRemovedStorageNodesFromTrie(it, output)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildRemovedStorageNodesFromTrie returns diffs for all the storage nodes in the provided node interator
|
// buildRemovedStorageNodesFromTrie returns diffs for all the storage nodes in the provided node interator
|
||||||
func (sdb *StateDiffBuilder) buildRemovedStorageNodesFromTrie(it trie.NodeIterator, output types2.StorageNodeSink) error {
|
func (sdb *StateDiffBuilder) buildRemovedStorageNodesFromTrie(it trie.NodeIterator, output sdtypes.StorageNodeSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildRemovedStorageNodesFromTrieTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildRemovedStorageNodesFromTrieTimer)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
if it.Leaf() { // only leaf values are indexed, don't need to demarcate removed intermediate nodes
|
if it.Leaf() { // only leaf values are indexed, don't need to demarcate removed intermediate nodes
|
||||||
leafKey := make([]byte, len(it.LeafKey()))
|
leafKey := make([]byte, len(it.LeafKey()))
|
||||||
copy(leafKey, it.LeafKey())
|
copy(leafKey, it.LeafKey())
|
||||||
if err := output(types2.StorageLeafNode{
|
if err := output(sdtypes.StorageLeafNode{
|
||||||
CID: shared.RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
Removed: true,
|
Removed: true,
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
@ -543,18 +510,18 @@ func (sdb *StateDiffBuilder) buildRemovedStorageNodesFromTrie(it trie.NodeIterat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
// buildStorageNodesIncremental builds the storage diff node objects for all nodes that exist in a different state at B than A
|
||||||
func (sdb *StateDiffBuilder) buildStorageNodesIncremental(oldSR common.Hash, newSR common.Hash, output types2.StorageNodeSink,
|
func (sdb *StateDiffBuilder) buildStorageNodesIncremental(oldroot common.Hash, newroot common.Hash, output sdtypes.StorageNodeSink,
|
||||||
ipldOutput types2.IPLDSink) error {
|
ipldOutput sdtypes.IPLDSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.BuildStorageNodesIncrementalTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.BuildStorageNodesIncrementalTimer)
|
||||||
if bytes.Equal(newSR.Bytes(), oldSR.Bytes()) {
|
if newroot == oldroot {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.Trace("Storage Roots for Incremental Diff", "old", oldSR.Hex(), "new", newSR.Hex())
|
log.Trace("Storage roots for incremental diff", "old", oldroot.String(), "new", newroot.String())
|
||||||
oldTrie, err := sdb.StateCache.OpenTrie(oldSR)
|
oldTrie, err := sdb.StateCache.OpenTrie(oldroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
newTrie, err := sdb.StateCache.OpenTrie(newSR)
|
newTrie, err := sdb.StateCache.OpenTrie(newroot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -564,50 +531,46 @@ func (sdb *StateDiffBuilder) buildStorageNodesIncremental(oldSR common.Hash, new
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
return sdb.deletedOrUpdatedStorage(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}),
|
||||||
diffSlotsAtB, output)
|
diffSlotsAtB, output)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *StateDiffBuilder) createdAndUpdatedStorage(a, b trie.NodeIterator, output types2.StorageNodeSink,
|
func (sdb *StateDiffBuilder) createdAndUpdatedStorage(a, b trie.NodeIterator, output sdtypes.StorageNodeSink,
|
||||||
ipldOutput types2.IPLDSink) (map[string]bool, error) {
|
ipldOutput sdtypes.IPLDSink) (map[string]bool, error) {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.CreatedAndUpdatedStorageTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.CreatedAndUpdatedStorageTimer)
|
||||||
diffSlotsAtB := make(map[string]bool)
|
diffSlotsAtB := make(map[string]bool)
|
||||||
|
|
||||||
|
var prevBlob []byte
|
||||||
it, _ := trie.NewDifferenceIterator(a, b)
|
it, _ := trie.NewDifferenceIterator(a, b)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
if it.Leaf() {
|
if it.Leaf() {
|
||||||
storageLeafNode, err := sdb.processStorageValueNode(it)
|
storageLeafNode := sdb.processStorageValueNode(it, prevBlob)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := output(storageLeafNode); err != nil {
|
if err := output(storageLeafNode); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
diffSlotsAtB[common.Bytes2Hex(storageLeafNode.LeafKey)] = true
|
diffSlotsAtB[hex.EncodeToString(storageLeafNode.LeafKey)] = true
|
||||||
} else {
|
} else {
|
||||||
if bytes.Equal(it.Hash().Bytes(), nullNodeHash) {
|
if it.Hash() == zeroHash {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
nodeVal := make([]byte, len(it.NodeBlob()))
|
nodeVal := make([]byte, len(it.NodeBlob()))
|
||||||
copy(nodeVal, it.NodeBlob())
|
copy(nodeVal, it.NodeBlob())
|
||||||
nodeHash := make([]byte, len(it.Hash().Bytes()))
|
nodeHash := make([]byte, len(it.Hash().Bytes()))
|
||||||
copy(nodeHash, it.Hash().Bytes())
|
copy(nodeHash, it.Hash().Bytes())
|
||||||
if err := ipldOutput(types2.IPLD{
|
if err := ipldOutput(sdtypes.IPLD{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStorageTrie, nodeHash).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStorageTrie, nodeHash).String(),
|
||||||
Content: nodeVal,
|
Content: nodeVal,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
prevBlob = nodeVal
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return diffSlotsAtB, it.Error()
|
return diffSlotsAtB, it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sdb *StateDiffBuilder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffSlotsAtB map[string]bool, output types2.StorageNodeSink) error {
|
func (sdb *StateDiffBuilder) deletedOrUpdatedStorage(a, b trie.NodeIterator, diffSlotsAtB map[string]bool, output sdtypes.StorageNodeSink) error {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.DeletedOrUpdatedStorageTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.DeletedOrUpdatedStorageTimer)
|
||||||
it, _ := trie.NewDifferenceIterator(b, a)
|
it, _ := trie.NewDifferenceIterator(b, a)
|
||||||
for it.Next(true) {
|
for it.Next(true) {
|
||||||
if it.Leaf() {
|
if it.Leaf() {
|
||||||
@ -616,8 +579,8 @@ func (sdb *StateDiffBuilder) deletedOrUpdatedStorage(a, b trie.NodeIterator, dif
|
|||||||
// if this node's leaf key did not show up in diffSlotsAtB
|
// if this node's leaf key did not show up in diffSlotsAtB
|
||||||
// that means the storage slot was vacated
|
// that means the storage slot was vacated
|
||||||
// in that case, emit an empty "removed" diff storage node
|
// in that case, emit an empty "removed" diff storage node
|
||||||
if _, ok := diffSlotsAtB[common.Bytes2Hex(leafKey)]; !ok {
|
if _, ok := diffSlotsAtB[hex.EncodeToString(leafKey)]; !ok {
|
||||||
if err := output(types2.StorageLeafNode{
|
if err := output(sdtypes.StorageLeafNode{
|
||||||
CID: shared.RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
Removed: true,
|
Removed: true,
|
||||||
LeafKey: leafKey,
|
LeafKey: leafKey,
|
||||||
@ -631,26 +594,28 @@ func (sdb *StateDiffBuilder) deletedOrUpdatedStorage(a, b trie.NodeIterator, dif
|
|||||||
return it.Error()
|
return it.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValidPrefixPath is used to check if a node at currentPath is a parent | ancestor to one of the addresses the builder is configured to watch
|
// isWatchedPathPrefix checks if a node path is a prefix (ancestor) to one of the watched addresses.
|
||||||
func isValidPrefixPath(watchedAddressesLeafPaths [][]byte, currentPath []byte) bool {
|
// An empty watch list means all paths are watched.
|
||||||
for _, watchedAddressPath := range watchedAddressesLeafPaths {
|
func isWatchedPathPrefix(watchedLeafPaths [][]byte, path []byte) bool {
|
||||||
if bytes.HasPrefix(watchedAddressPath, currentPath) {
|
if len(watchedLeafPaths) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, watched := range watchedLeafPaths {
|
||||||
|
if bytes.HasPrefix(watched, path) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// isWatchedAddress is used to check if a state account corresponds to one of the addresses the builder is configured to watch
|
// isWatchedPath checks if a node path corresponds to one of the watched addresses
|
||||||
func isWatchedAddress(watchedAddressesLeafPaths [][]byte, valueNodePath []byte) bool {
|
func isWatchedPath(watchedLeafPaths [][]byte, leafPath []byte) bool {
|
||||||
defer metrics2.UpdateDuration(time.Now(), metrics2.IndexerMetrics.IsWatchedAddressTimer)
|
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.IsWatchedAddressTimer)
|
||||||
for _, watchedAddressPath := range watchedAddressesLeafPaths {
|
for _, watched := range watchedLeafPaths {
|
||||||
if bytes.Equal(watchedAddressPath, valueNodePath) {
|
if bytes.Equal(watched, leafPath) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1380
builder_test.go
24
config.go
@ -23,7 +23,9 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config contains instantiation parameters for the state diffing service
|
// Config contains instantiation parameters for the state diffing service
|
||||||
@ -36,11 +38,15 @@ type Config struct {
|
|||||||
ClientName string
|
ClientName string
|
||||||
// Whether to enable writing state diffs directly to track blockchain head
|
// Whether to enable writing state diffs directly to track blockchain head
|
||||||
EnableWriteLoop bool
|
EnableWriteLoop bool
|
||||||
|
// The maximum number of blocks to backfill when tracking head.
|
||||||
|
BackfillMaxHeadGap uint64
|
||||||
|
// The maximum number of blocks behind the startup position to check for gaps.
|
||||||
|
BackfillCheckPastBlocks uint64
|
||||||
// Size of the worker pool
|
// Size of the worker pool
|
||||||
NumWorkers uint
|
NumWorkers uint
|
||||||
// Should the statediff service wait until geth has synced to the head of the blockchain?
|
// Should the statediff service wait until geth has synced to the head of the blockchain?
|
||||||
WaitForSync bool
|
WaitForSync bool
|
||||||
// Context
|
// Context used during DB initialization
|
||||||
Context context.Context
|
Context context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,7 +64,7 @@ type Params struct {
|
|||||||
func (p *Params) ComputeWatchedAddressesLeafPaths() {
|
func (p *Params) ComputeWatchedAddressesLeafPaths() {
|
||||||
p.watchedAddressesLeafPaths = make([][]byte, len(p.WatchedAddresses))
|
p.watchedAddressesLeafPaths = make([][]byte, len(p.WatchedAddresses))
|
||||||
for i, address := range p.WatchedAddresses {
|
for i, address := range p.WatchedAddresses {
|
||||||
p.watchedAddressesLeafPaths[i] = keybytesToHex(crypto.Keccak256(address.Bytes()))
|
p.watchedAddressesLeafPaths[i] = utils.KeybytesToHex(crypto.Keccak256(address[:]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,15 +79,3 @@ type Args struct {
|
|||||||
OldStateRoot, NewStateRoot, BlockHash common.Hash
|
OldStateRoot, NewStateRoot, BlockHash common.Hash
|
||||||
BlockNumber *big.Int
|
BlockNumber *big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://github.com/ethereum/go-ethereum/blob/master/trie/encoding.go#L97
|
|
||||||
func keybytesToHex(str []byte) []byte {
|
|
||||||
l := len(str)*2 + 1
|
|
||||||
var nibbles = make([]byte, l)
|
|
||||||
for i, b := range str {
|
|
||||||
nibbles[i*2] = b / 16
|
|
||||||
nibbles[i*2+1] = b % 16
|
|
||||||
}
|
|
||||||
nibbles[l-1] = 16
|
|
||||||
return nibbles
|
|
||||||
}
|
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
# Overview
|
|
||||||
|
|
||||||
This document will provide some insight into the `known_gaps` table, their use cases, and implementation. Please refer to the [following PR](https://github.com/vulcanize/go-ethereum/pull/217) and the [following epic](https://github.com/vulcanize/ops/issues/143) to grasp their inception.
|
|
||||||
|
|
||||||
![known gaps](diagrams/KnownGapsProcess.png)
|
|
||||||
|
|
||||||
# Use Cases
|
|
||||||
|
|
||||||
The known gaps table is updated when the following events occur:
|
|
||||||
|
|
||||||
1. At start up we check the latest block from the `eth.headers_cid` table. We compare the first block that we are processing with the latest block from the DB. If they are not one unit of expectedDifference away from each other, add the gap between the two blocks.
|
|
||||||
2. If there is any error in processing a block (db connection, deadlock, etc), add that block to the knownErrorBlocks slice, when the next block is successfully written, write this slice into the DB.
|
|
||||||
|
|
||||||
# Glossary
|
|
||||||
|
|
||||||
1. `expectedDifference (number)` - This number indicates what the difference between two blocks should be. If we are capturing all events on a geth node then this number would be `1`. But once we scale nodes, the `expectedDifference` might be `2` or greater.
|
|
||||||
2. `processingKey (number)` - This number can be used to keep track of different geth nodes and their specific `expectedDifference`.
|
|
@ -1,3 +0,0 @@
|
|||||||
# Overview
|
|
||||||
|
|
||||||
This folder keeps tracks of random documents as they relate to the `statediff` service.
|
|
@ -1,21 +0,0 @@
|
|||||||
# Overview
|
|
||||||
|
|
||||||
This document will go through some notes on the database component of the statediff service.
|
|
||||||
|
|
||||||
# Components
|
|
||||||
|
|
||||||
- Indexer: The indexer creates IPLD and DB models to insert to the Postgres DB. It performs the insert utilizing and atomic function.
|
|
||||||
- Builder: The builder constructs the statediff object that needs to be inserted.
|
|
||||||
- Known Gaps: Captures any gaps that might have occured and either writes them to the DB, local sql file, to prometeus, or a local error.
|
|
||||||
|
|
||||||
# Making Code Changes
|
|
||||||
|
|
||||||
## Adding a New Function to the Indexer
|
|
||||||
|
|
||||||
If you want to implement a new feature for adding data to the database. Keep the following in mind:
|
|
||||||
|
|
||||||
1. You need to handle `sql`, `file`, and `dump`.
|
|
||||||
1. `sql` - Contains the code needed to write directly to the `sql` db.
|
|
||||||
2. `file` - Contains all the code required to write the SQL statements to a file.
|
|
||||||
3. `dump` - Contains all the code for outputting events to the console.
|
|
||||||
2. You will have to add it to the `interfaces.StateDiffIndexer` interface.
|
|
Before Width: | Height: | Size: 33 KiB |
13
docs/indexer.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Statediff database indexing
|
||||||
|
|
||||||
|
To process data in real time as Geth syncs updates to the Ethereum execution layer, the statediff
|
||||||
|
service is able to directly transform and load data into a Postgres database. The `indexer` package
|
||||||
|
contains abstractions for handling this ingestion.
|
||||||
|
|
||||||
|
## Interface
|
||||||
|
|
||||||
|
A `StateDiffIndexer` object is responsible for inserting statediff data into a database, as well as managing watched address lists for a given database.
|
||||||
|
Three implementations are currently maintained:
|
||||||
|
* `sql` for direct insertion to Postgres
|
||||||
|
* `file` which writes to CSV for SQL files for insertion in a separate step
|
||||||
|
* `dump` which simply dumps to stdout
|
127
go.mod
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
module github.com/cerc-io/plugeth-statediff
|
||||||
|
|
||||||
|
go 1.19
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/ethereum/go-ethereum v1.11.6
|
||||||
|
github.com/georgysavva/scany v0.2.9
|
||||||
|
github.com/golang/mock v1.6.0
|
||||||
|
github.com/inconshreveable/log15 v2.16.0+incompatible
|
||||||
|
github.com/ipfs/go-cid v0.2.0
|
||||||
|
github.com/jackc/pgconn v1.10.0
|
||||||
|
github.com/jackc/pgtype v1.8.1
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0
|
||||||
|
github.com/jmoiron/sqlx v1.2.0
|
||||||
|
github.com/lib/pq v1.10.6
|
||||||
|
github.com/multiformats/go-multihash v0.1.0
|
||||||
|
github.com/openrelayxyz/plugeth-utils v1.2.0
|
||||||
|
github.com/pganalyze/pg_query_go/v4 v4.2.1
|
||||||
|
github.com/shopspring/decimal v1.2.0
|
||||||
|
github.com/stretchr/testify v1.8.1
|
||||||
|
github.com/thoas/go-funk v0.9.2
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/DataDog/zstd v1.5.2 // indirect
|
||||||
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||||
|
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/cockroachdb/errors v1.9.1 // indirect
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||||
|
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
||||||
|
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||||
|
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||||
|
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||||
|
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
|
github.com/go-stack/stack v1.8.1 // indirect
|
||||||
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/gorilla/websocket v1.4.2 // indirect
|
||||||
|
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||||
|
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||||
|
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||||
|
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||||
|
github.com/huin/goupnp v1.0.3 // indirect
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||||
|
github.com/jackc/pgio v1.0.0 // indirect
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
|
github.com/jackc/pgproto3/v2 v2.1.1 // indirect
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||||
|
github.com/jackc/puddle v1.1.3 // indirect
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
|
github.com/klauspost/compress v1.15.15 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||||
|
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||||
|
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||||
|
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||||
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
|
github.com/prometheus/common v0.39.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
||||||
|
github.com/rs/cors v1.7.0 // indirect
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||||
|
github.com/stretchr/objx v0.5.0 // indirect
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
|
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||||
|
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||||
|
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||||
|
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
|
golang.org/x/crypto v0.1.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||||
|
golang.org/x/net v0.8.0 // indirect
|
||||||
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
|
golang.org/x/sys v0.6.0 // indirect
|
||||||
|
golang.org/x/term v0.6.0 // indirect
|
||||||
|
golang.org/x/text v0.8.0 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||||
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
lukechampine.com/blake3 v1.1.6 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
github.com/ethereum/go-ethereum => git.vdb.to/cerc-io/plugeth v0.0.0-20230710223804-34971d65a36a
|
||||||
|
github.com/openrelayxyz/plugeth-utils => git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46
|
||||||
|
)
|
762
go.sum
Normal file
@ -0,0 +1,762 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
git.vdb.to/cerc-io/plugeth v0.0.0-20230710223804-34971d65a36a h1:R3DoXSTTXc0xc3M/hOFppVitj1lk1cn2VWTsZloYZ/8=
|
||||||
|
git.vdb.to/cerc-io/plugeth v0.0.0-20230710223804-34971d65a36a/go.mod h1:odpOaIpK01aVThIoAuw9YryLBJeHYOsDn9Mxm4LhB5s=
|
||||||
|
git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46 h1:KYcbbne/RXd7AuxbUd/3hgk1jPN+33k2CKiNsUsMCC0=
|
||||||
|
git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46/go.mod h1:VpDN61dxy64zGff05F0adujR5enD/JEdXBkTQ+PaIsQ=
|
||||||
|
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
||||||
|
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||||
|
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||||
|
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
|
||||||
|
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||||
|
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||||
|
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||||
|
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||||
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||||
|
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||||
|
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||||
|
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
|
||||||
|
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||||
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||||
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||||
|
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||||
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||||
|
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||||
|
github.com/cockroachdb/cockroach-go/v2 v2.0.3 h1:ZA346ACHIZctef6trOTwBAEvPVm1k0uLm/bb2Atc+S8=
|
||||||
|
github.com/cockroachdb/cockroach-go/v2 v2.0.3/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw=
|
||||||
|
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||||
|
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||||
|
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
|
||||||
|
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||||
|
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||||
|
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
|
||||||
|
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
|
||||||
|
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||||
|
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||||
|
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
|
||||||
|
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||||
|
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||||
|
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||||
|
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||||
|
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||||
|
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||||
|
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||||
|
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
|
github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||||
|
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||||
|
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||||
|
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||||
|
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||||
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||||
|
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
|
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||||
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||||
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||||
|
github.com/georgysavva/scany v0.2.9 h1:Xt6rjYpHnMClTm/g+oZTnoSxUwiln5GqMNU+QeLNHQU=
|
||||||
|
github.com/georgysavva/scany v0.2.9/go.mod h1:yeOeC1BdIdl6hOwy8uefL2WNSlseFzbhlG/frrh65SA=
|
||||||
|
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
|
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||||
|
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
|
||||||
|
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
|
||||||
|
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
|
||||||
|
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
|
||||||
|
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||||
|
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||||
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||||
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||||
|
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||||
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
|
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||||
|
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||||
|
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||||
|
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
|
||||||
|
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||||
|
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
|
||||||
|
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||||
|
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
|
||||||
|
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||||
|
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
|
||||||
|
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||||
|
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
|
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||||
|
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||||
|
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||||
|
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
|
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||||
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
|
||||||
|
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||||
|
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||||
|
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||||
|
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||||
|
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||||
|
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8=
|
||||||
|
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||||
|
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||||
|
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||||
|
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||||
|
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||||
|
github.com/inconshreveable/log15 v2.16.0+incompatible h1:6nvMKxtGcpgm7q0KiGs+Vc+xDvUXaBqsPKHWKsinccw=
|
||||||
|
github.com/inconshreveable/log15 v2.16.0+incompatible/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k=
|
||||||
|
github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c h1:qSHzRbhzK8RdXOsAdfDgO49TtqC1oZ+acxPrkfTxcCs=
|
||||||
|
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
|
||||||
|
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||||
|
github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0=
|
||||||
|
github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro=
|
||||||
|
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||||
|
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||||
|
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||||
|
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||||
|
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||||
|
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||||
|
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||||
|
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||||
|
github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
|
||||||
|
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||||
|
github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||||
|
github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78=
|
||||||
|
github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA=
|
||||||
|
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
|
||||||
|
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
|
||||||
|
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||||
|
github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU=
|
||||||
|
github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
|
||||||
|
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||||
|
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=
|
||||||
|
github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
|
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||||
|
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI=
|
||||||
|
github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||||
|
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||||
|
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||||
|
github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
|
||||||
|
github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
|
||||||
|
github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
|
||||||
|
github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
|
||||||
|
github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
|
||||||
|
github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
|
||||||
|
github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs=
|
||||||
|
github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||||
|
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||||
|
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||||
|
github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
|
||||||
|
github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
|
||||||
|
github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0=
|
||||||
|
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570=
|
||||||
|
github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0=
|
||||||
|
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
|
||||||
|
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||||
|
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||||
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
|
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||||
|
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||||
|
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||||
|
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||||
|
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||||
|
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||||
|
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
|
||||||
|
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
|
||||||
|
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
|
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
|
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||||
|
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||||
|
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||||
|
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||||
|
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||||
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.4.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
|
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
|
||||||
|
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||||
|
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||||
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||||
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||||
|
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||||
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
|
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||||
|
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||||
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||||
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||||
|
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||||
|
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||||
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
||||||
|
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||||
|
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
|
||||||
|
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
||||||
|
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
||||||
|
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||||
|
github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA=
|
||||||
|
github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84=
|
||||||
|
github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY=
|
||||||
|
github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
||||||
|
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||||
|
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||||
|
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||||
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
|
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
|
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||||
|
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||||
|
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||||
|
github.com/pganalyze/pg_query_go/v4 v4.2.1 h1:id/vuyIQccb9f6Yx3pzH5l4QYrxE3v6/m8RPlgMrprc=
|
||||||
|
github.com/pganalyze/pg_query_go/v4 v4.2.1/go.mod h1:aEkDNOXNM5j0YGzaAapwJ7LB3dLNj+bvbWcLv1hOVqA=
|
||||||
|
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||||
|
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||||
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||||
|
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||||
|
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||||
|
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||||
|
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||||
|
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||||
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
|
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||||
|
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||||
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
|
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||||
|
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
|
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||||
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
|
||||||
|
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
github.com/shopspring/decimal v0.0.0-20200419222939-1884f454f8ea/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
|
||||||
|
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
|
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||||
|
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||||
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||||
|
github.com/thoas/go-funk v0.9.2 h1:oKlNYv0AY5nyf9g+/GhMgS/UO2ces0QRdPKwkhY3VCk=
|
||||||
|
github.com/thoas/go-funk v0.9.2/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||||
|
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
|
||||||
|
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
|
||||||
|
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||||
|
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||||
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
|
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||||
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
|
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||||
|
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||||
|
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||||
|
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||||
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
|
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||||
|
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||||
|
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||||
|
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||||
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||||
|
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||||
|
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||||
|
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||||
|
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
|
||||||
|
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||||
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
|
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||||
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||||
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
|
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
|
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||||
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||||
|
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||||
|
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||||
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||||
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||||
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||||
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
|
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||||
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
|
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||||
|
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||||
|
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||||
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||||
|
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||||
|
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||||
|
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
|
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||||
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||||
|
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
|
||||||
|
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
@ -20,15 +20,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/dump"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/dump"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface.
|
// NewStateDiffIndexer creates and returns an implementation of the StateDiffIndexer interface.
|
||||||
@ -41,7 +42,7 @@ func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, n
|
|||||||
return nil, nil, fmt.Errorf("file config is not the correct type: got %T, expected %T", config, file.Config{})
|
return nil, nil, fmt.Errorf("file config is not the correct type: got %T, expected %T", config, file.Config{})
|
||||||
}
|
}
|
||||||
fc.NodeInfo = nodeInfo
|
fc.NodeInfo = nodeInfo
|
||||||
ind, err := file.NewStateDiffIndexer(ctx, chainConfig, fc)
|
ind, err := file.NewStateDiffIndexer(chainConfig, fc)
|
||||||
return nil, ind, err
|
return nil, ind, err
|
||||||
case shared.POSTGRES:
|
case shared.POSTGRES:
|
||||||
log.Info("Starting statediff service in Postgres writing mode")
|
log.Info("Starting statediff service in Postgres writing mode")
|
||||||
|
@ -20,9 +20,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
|
// BatchTx wraps a void with the state necessary for building the tx concurrently during trie difference iteration
|
||||||
|
@ -21,9 +21,14 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Config for data dump
|
||||||
|
type Config struct {
|
||||||
|
Dump io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
// DumpType to explicitly type the dump destination
|
// DumpType to explicitly type the dump destination
|
||||||
type DumpType string
|
type DumpType string
|
||||||
|
|
||||||
@ -31,9 +36,14 @@ const (
|
|||||||
STDOUT = "Stdout"
|
STDOUT = "Stdout"
|
||||||
STDERR = "Stderr"
|
STDERR = "Stderr"
|
||||||
DISCARD = "Discard"
|
DISCARD = "Discard"
|
||||||
UNKNOWN = "Unknown"
|
INVALID = "Invalid"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Type satisfies interfaces.Config
|
||||||
|
func (c Config) Type() shared.DBType {
|
||||||
|
return shared.DUMP
|
||||||
|
}
|
||||||
|
|
||||||
// ResolveDumpType resolves the dump type for the provided string
|
// ResolveDumpType resolves the dump type for the provided string
|
||||||
func ResolveDumpType(str string) (DumpType, error) {
|
func ResolveDumpType(str string) (DumpType, error) {
|
||||||
switch strings.ToLower(str) {
|
switch strings.ToLower(str) {
|
||||||
@ -44,36 +54,27 @@ func ResolveDumpType(str string) (DumpType, error) {
|
|||||||
case "discard", "void", "devnull", "dev null":
|
case "discard", "void", "devnull", "dev null":
|
||||||
return DISCARD, nil
|
return DISCARD, nil
|
||||||
default:
|
default:
|
||||||
return UNKNOWN, fmt.Errorf("unrecognized dump type: %s", str)
|
return INVALID, fmt.Errorf("unrecognized dump type: %s", str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config for data dump
|
// Set satisfies flag.Value
|
||||||
type Config struct {
|
func (d *DumpType) Set(v string) (err error) {
|
||||||
Dump io.WriteCloser
|
*d, err = ResolveDumpType(v)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type satisfies interfaces.Config
|
// String satisfies flag.Value
|
||||||
func (c Config) Type() shared.DBType {
|
func (d *DumpType) String() string {
|
||||||
return shared.DUMP
|
return strings.ToLower(string(*d))
|
||||||
}
|
|
||||||
|
|
||||||
// NewDiscardWriterCloser returns a discardWrapper wrapping io.Discard
|
|
||||||
func NewDiscardWriterCloser() io.WriteCloser {
|
|
||||||
return discardWrapper{blackhole: io.Discard}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// discardWrapper wraps io.Discard with io.Closer
|
// discardWrapper wraps io.Discard with io.Closer
|
||||||
type discardWrapper struct {
|
type discardWrapper struct{ io.Writer }
|
||||||
blackhole io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write satisfies io.Writer
|
var Discard = discardWrapper{io.Discard}
|
||||||
func (dw discardWrapper) Write(b []byte) (int, error) {
|
|
||||||
return dw.blackhole.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close satisfies io.Closer
|
// Close satisfies io.Closer
|
||||||
func (dw discardWrapper) Close() error {
|
func (discardWrapper) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
package dump
|
package dump
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -28,15 +28,16 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
||||||
@ -199,8 +200,8 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
if preparedHash != unclesHash {
|
||||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.String(), unclesHash.String())
|
||||||
}
|
}
|
||||||
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -294,7 +295,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
|
|||||||
if len(receipt.PostState) == 0 {
|
if len(receipt.PostState) == 0 {
|
||||||
rctModel.PostStatus = receipt.Status
|
rctModel.PostStatus = receipt.Status
|
||||||
} else {
|
} else {
|
||||||
rctModel.PostState = common.Bytes2Hex(receipt.PostState)
|
rctModel.PostState = hex.EncodeToString(receipt.PostState)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", rctModel); err != nil {
|
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", rctModel); err != nil {
|
||||||
@ -305,7 +306,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
|
|||||||
for idx, l := range receipt.Logs {
|
for idx, l := range receipt.Logs {
|
||||||
topicSet := make([]string, 4)
|
topicSet := make([]string, 4)
|
||||||
for ti, topic := range l.Topics {
|
for ti, topic := range l.Topics {
|
||||||
topicSet[ti] = topic.Hex()
|
topicSet[ti] = topic.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
logDataSet[idx] = &models.LogsModel{
|
logDataSet[idx] = &models.LogsModel{
|
||||||
@ -412,6 +413,24 @@ func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlock checks whether the indicated block already exists in the output.
|
||||||
|
// In the "dump" case, this is presumed to be false.
|
||||||
|
func (sdi *StateDiffIndexer) HasBlock(hash common.Hash, number uint64) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock returns the HeaderModel of the highest existing block in the output.
|
||||||
|
// In the "dump" case, this is always nil.
|
||||||
|
func (sdi *StateDiffIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGaps returns a list of gaps in the output found within the specified block range.
|
||||||
|
// In the "dump" case this is always nil.
|
||||||
|
func (sdi *StateDiffIndexer) DetectGaps(beginBlockNumber uint64, endBlockNumber uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Close satisfies io.Closer
|
// Close satisfies io.Closer
|
||||||
func (sdi *StateDiffIndexer) Close() error {
|
func (sdi *StateDiffIndexer) Close() error {
|
||||||
return sdi.dump.Close()
|
return sdi.dump.Close()
|
||||||
|
@ -20,17 +20,26 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Config holds params for writing out CSV or SQL files
|
||||||
|
type Config struct {
|
||||||
|
Mode FileMode
|
||||||
|
OutputDir string
|
||||||
|
FilePath string
|
||||||
|
WatchedAddressesFilePath string
|
||||||
|
NodeInfo node.Info
|
||||||
|
}
|
||||||
|
|
||||||
// FileMode to explicitly type the mode of file writer we are using
|
// FileMode to explicitly type the mode of file writer we are using
|
||||||
type FileMode string
|
type FileMode string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
CSV FileMode = "CSV"
|
CSV FileMode = "CSV"
|
||||||
SQL FileMode = "SQL"
|
SQL FileMode = "SQL"
|
||||||
Unknown FileMode = "Unknown"
|
Invalid FileMode = "Invalid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResolveFileMode resolves a FileMode from a provided string
|
// ResolveFileMode resolves a FileMode from a provided string
|
||||||
@ -41,17 +50,19 @@ func ResolveFileMode(str string) (FileMode, error) {
|
|||||||
case "sql":
|
case "sql":
|
||||||
return SQL, nil
|
return SQL, nil
|
||||||
default:
|
default:
|
||||||
return Unknown, fmt.Errorf("unrecognized file type string: %s", str)
|
return Invalid, fmt.Errorf("unrecognized file type string: %s", str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config holds params for writing out CSV or SQL files
|
// Set satisfies flag.Value
|
||||||
type Config struct {
|
func (f *FileMode) Set(v string) (err error) {
|
||||||
Mode FileMode
|
*f, err = ResolveFileMode(v)
|
||||||
OutputDir string
|
return
|
||||||
FilePath string
|
}
|
||||||
WatchedAddressesFilePath string
|
|
||||||
NodeInfo node.Info
|
// Set satisfies flag.Value
|
||||||
|
func (f *FileMode) String() string {
|
||||||
|
return strings.ToLower(string(*f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Type satisfies interfaces.Config
|
// Type satisfies interfaces.Config
|
||||||
|
@ -27,11 +27,11 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
const dbDirectory = "/file_indexer"
|
const dbDirectory = "/file_indexer"
|
||||||
@ -43,7 +43,7 @@ func setupLegacyCSVIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ind, err = file.NewStateDiffIndexer(context.Background(), test.LegacyConfig, file.CSVTestConfig)
|
ind, err = file.NewStateDiffIndexer(test.LegacyConfig, file.CSVTestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
db, err = postgres.SetupSQLXDB()
|
db, err = postgres.SetupSQLXDB()
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package file_test
|
package file_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
@ -25,15 +24,13 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupCSVIndexer(t *testing.T) {
|
func setupCSVIndexer(t *testing.T) {
|
||||||
file.CSVTestConfig.OutputDir = "./statediffing_test"
|
|
||||||
|
|
||||||
if _, err := os.Stat(file.CSVTestConfig.OutputDir); !errors.Is(err, os.ErrNotExist) {
|
if _, err := os.Stat(file.CSVTestConfig.OutputDir); !errors.Is(err, os.ErrNotExist) {
|
||||||
err := os.RemoveAll(file.CSVTestConfig.OutputDir)
|
err := os.RemoveAll(file.CSVTestConfig.OutputDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -44,7 +41,7 @@ func setupCSVIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.CSVTestConfig)
|
ind, err = file.NewStateDiffIndexer(mocks.TestChainConfig, file.CSVTestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
db, err = postgres.SetupSQLXDB()
|
db, err = postgres.SetupSQLXDB()
|
||||||
@ -69,7 +66,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexHeaderIPLDs(t, db)
|
test.DoTestPublishAndIndexHeaderIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -77,7 +74,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionIPLDs(t, db)
|
test.DoTestPublishAndIndexTransactionIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
||||||
@ -85,7 +82,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogIPLDs(t, db)
|
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -93,7 +90,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptIPLDs(t, db)
|
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -101,7 +98,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateIPLDs(t, db)
|
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -109,7 +106,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageIPLDs(t, db)
|
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,7 +124,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionsNonCanonical(t, db)
|
test.DoTestPublishAndIndexTransactionsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipts", func(t *testing.T) {
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
@ -135,7 +132,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptsNonCanonical(t, db)
|
test.DoTestPublishAndIndexReceiptsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index logs", func(t *testing.T) {
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
@ -143,7 +140,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogsNonCanonical(t, db)
|
test.DoTestPublishAndIndexLogsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state nodes", func(t *testing.T) {
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
@ -151,7 +148,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateNonCanonical(t, db)
|
test.DoTestPublishAndIndexStateNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
@ -159,7 +156,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpCSVFileData(t)
|
dumpCSVFileData(t)
|
||||||
defer tearDownCSV(t)
|
defer tearDownCSV(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageNonCanonical(t, db)
|
test.DoTestPublishAndIndexStorageNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,15 +25,15 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/thoas/go-funk"
|
"github.com/thoas/go-funk"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
nodeinfo "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -92,9 +92,6 @@ func newFileWriter(path string) (ret fileWriter, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func makeFileWriters(dir string, tables []*schema.Table) (fileWriters, error) {
|
func makeFileWriters(dir string, tables []*schema.Table) (fileWriters, error) {
|
||||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
writers := fileWriters{}
|
writers := fileWriters{}
|
||||||
for _, tbl := range tables {
|
for _, tbl := range tables {
|
||||||
w, err := newFileWriter(TableFilePath(dir, tbl.Name))
|
w, err := newFileWriter(TableFilePath(dir, tbl.Name))
|
||||||
@ -133,7 +130,7 @@ func (tx fileWriters) flush() error {
|
|||||||
|
|
||||||
func NewCSVWriter(path string, watchedAddressesFilePath string) (*CSVWriter, error) {
|
func NewCSVWriter(path string, watchedAddressesFilePath string) (*CSVWriter, error) {
|
||||||
if err := os.MkdirAll(path, 0777); err != nil {
|
if err := os.MkdirAll(path, 0777); err != nil {
|
||||||
return nil, fmt.Errorf("unable to make MkdirAll for path: %s err: %s", path, err)
|
return nil, fmt.Errorf("unable to create directory '%s': %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
writers, err := makeFileWriters(path, Tables)
|
writers, err := makeFileWriters(path, Tables)
|
||||||
@ -386,7 +383,7 @@ func loadWatchedAddressesRows(filePath string) ([][]string, error) {
|
|||||||
return [][]string{}, nil
|
return [][]string{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("error opening watched addresses file: %v", err)
|
return nil, fmt.Errorf("error opening watched addresses file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
@ -402,7 +399,7 @@ func dumpWatchedAddressesRows(watchedAddressesWriter fileWriter, filteredRows []
|
|||||||
|
|
||||||
file, err := os.Create(file.Name())
|
file, err := os.Create(file.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating watched addresses file: %v", err)
|
return fmt.Errorf("error creating watched addresses file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
watchedAddressesWriter.Writer = csv.NewWriter(file)
|
watchedAddressesWriter.Writer = csv.NewWriter(file)
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
package file
|
package file
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -27,21 +25,21 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lib/pq"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/lib/pq"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultCSVOutputDir = "./statediff_output"
|
const defaultCSVOutputDir = "./statediff_output"
|
||||||
@ -63,7 +61,7 @@ type StateDiffIndexer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer
|
// NewStateDiffIndexer creates a void implementation of interfaces.StateDiffIndexer
|
||||||
func NewStateDiffIndexer(ctx context.Context, chainConfig *params.ChainConfig, config Config) (*StateDiffIndexer, error) {
|
func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config) (*StateDiffIndexer, error) {
|
||||||
var err error
|
var err error
|
||||||
var writer FileWriter
|
var writer FileWriter
|
||||||
|
|
||||||
@ -176,7 +174,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
metrics.IndexerMetrics.PostgresCommitTimer.Update(tDiff)
|
metrics.IndexerMetrics.PostgresCommitTimer.Update(tDiff)
|
||||||
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
traceMsg += fmt.Sprintf("postgres transaction commit duration: %s\r\n", tDiff.String())
|
||||||
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
traceMsg += fmt.Sprintf(" TOTAL PROCESSING DURATION: %s\r\n", time.Since(start).String())
|
||||||
log.Debug(traceMsg)
|
log.Trace(traceMsg)
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -258,8 +256,8 @@ func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
if preparedHash != unclesHash {
|
||||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.String(), unclesHash.String())
|
||||||
}
|
}
|
||||||
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -358,7 +356,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
|
|||||||
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), args.logNodes[i][idx])
|
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), args.logNodes[i][idx])
|
||||||
topicSet := make([]string, 4)
|
topicSet := make([]string, 4)
|
||||||
for ti, topic := range l.Topics {
|
for ti, topic := range l.Topics {
|
||||||
topicSet[ti] = topic.Hex()
|
topicSet[ti] = topic.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
logDataSet[idx] = &models.LogsModel{
|
logDataSet[idx] = &models.LogsModel{
|
||||||
@ -461,6 +459,24 @@ func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CurrentBlock returns the HeaderModel of the highest existing block in the output.
|
||||||
|
// In the "file" case, this is always nil.
|
||||||
|
func (sdi *StateDiffIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGaps returns a list of gaps in the output found within the specified block range.
|
||||||
|
// In the "file" case this is always nil.
|
||||||
|
func (sdi *StateDiffIndexer) DetectGaps(beginBlockNumber uint64, endBlockNumber uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlock checks whether the indicated block already exists in the output.
|
||||||
|
// In the "file" case this is presumed to be false.
|
||||||
|
func (sdi *StateDiffIndexer) HasBlock(hash common.Hash, number uint64) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Close satisfies io.Closer
|
// Close satisfies io.Closer
|
||||||
func (sdi *StateDiffIndexer) Close() error {
|
func (sdi *StateDiffIndexer) Close() error {
|
||||||
return sdi.fileWriter.Close()
|
return sdi.fileWriter.Close()
|
||||||
|
@ -19,12 +19,12 @@ package file
|
|||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
|
||||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
nodeinfo "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Writer interface required by the file indexer
|
// Writer interface required by the file indexer
|
||||||
|
@ -24,16 +24,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -44,10 +44,6 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("MODE") != "statediff" {
|
|
||||||
fmt.Println("Skipping statediff test")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
if os.Getenv("STATEDIFF_DB") != "file" {
|
if os.Getenv("STATEDIFF_DB") != "file" {
|
||||||
fmt.Println("Skipping statediff .sql file writing mode test")
|
fmt.Println("Skipping statediff .sql file writing mode test")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
@ -87,7 +83,7 @@ func setupMainnetIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ind, err = file.NewStateDiffIndexer(context.Background(), chainConf, file.CSVTestConfig)
|
ind, err = file.NewStateDiffIndexer(chainConf, file.CSVTestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
db, err = postgres.SetupSQLXDB()
|
db, err = postgres.SetupSQLXDB()
|
||||||
|
@ -24,12 +24,12 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -44,7 +44,7 @@ func setupLegacySQLIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ind, err = file.NewStateDiffIndexer(context.Background(), test.LegacyConfig, file.SQLTestConfig)
|
ind, err = file.NewStateDiffIndexer(test.LegacyConfig, file.SQLTestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
db, err = postgres.SetupSQLXDB()
|
db, err = postgres.SetupSQLXDB()
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package file_test
|
package file_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
@ -25,10 +24,10 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupIndexer(t *testing.T) {
|
func setupIndexer(t *testing.T) {
|
||||||
@ -42,7 +41,7 @@ func setupIndexer(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ind, err = file.NewStateDiffIndexer(context.Background(), mocks.TestConfig, file.SQLTestConfig)
|
ind, err = file.NewStateDiffIndexer(mocks.TestChainConfig, file.SQLTestConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
db, err = postgres.SetupSQLXDB()
|
db, err = postgres.SetupSQLXDB()
|
||||||
@ -67,7 +66,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexHeaderIPLDs(t, db)
|
test.DoTestPublishAndIndexHeaderIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -75,7 +74,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionIPLDs(t, db)
|
test.DoTestPublishAndIndexTransactionIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
||||||
@ -83,7 +82,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogIPLDs(t, db)
|
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -91,7 +90,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptIPLDs(t, db)
|
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -99,7 +98,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateIPLDs(t, db)
|
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -107,7 +106,7 @@ func TestSQLFileIndexer(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageIPLDs(t, db)
|
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +124,7 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionsNonCanonical(t, db)
|
test.DoTestPublishAndIndexTransactionsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipts", func(t *testing.T) {
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
@ -133,7 +132,7 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptsNonCanonical(t, db)
|
test.DoTestPublishAndIndexReceiptsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index logs", func(t *testing.T) {
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
@ -141,7 +140,7 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogsNonCanonical(t, db)
|
test.DoTestPublishAndIndexLogsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state nodes", func(t *testing.T) {
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
@ -149,7 +148,7 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateNonCanonical(t, db)
|
test.DoTestPublishAndIndexStateNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
@ -157,7 +156,7 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
|
|||||||
dumpFileData(t)
|
dumpFileData(t)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageNonCanonical(t, db)
|
test.DoTestPublishAndIndexStorageNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,15 +24,15 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
pg_query "github.com/pganalyze/pg_query_go/v2"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
pg_query "github.com/pganalyze/pg_query_go/v4"
|
||||||
"github.com/thoas/go-funk"
|
"github.com/thoas/go-funk"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
nodeinfo "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
nodeinfo "github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/types"
|
||||||
"github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -382,10 +382,7 @@ func parseWatchedAddressStatement(stmt string) (string, error) {
|
|||||||
addressString := parseResult.Stmts[0].Stmt.GetInsertStmt().
|
addressString := parseResult.Stmts[0].Stmt.GetInsertStmt().
|
||||||
SelectStmt.GetSelectStmt().
|
SelectStmt.GetSelectStmt().
|
||||||
ValuesLists[0].GetList().
|
ValuesLists[0].GetList().
|
||||||
Items[0].GetAConst().
|
Items[0].GetAConst().GetSval().Sval
|
||||||
GetVal().
|
|
||||||
GetString_().
|
|
||||||
Str
|
|
||||||
|
|
||||||
return addressString, nil
|
return addressString, nil
|
||||||
}
|
}
|
||||||
|
@ -17,13 +17,12 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -73,9 +72,8 @@ type IndexerMetricsHandles struct {
|
|||||||
StateStoreCodeProcessingTimer metrics.Timer
|
StateStoreCodeProcessingTimer metrics.Timer
|
||||||
|
|
||||||
// Fine-grained code timers
|
// Fine-grained code timers
|
||||||
BuildStateDiffWithIntermediateStateNodesTimer metrics.Timer
|
BuildStateDiffTimer metrics.Timer
|
||||||
BuildStateDiffWithoutIntermediateStateNodesTimer metrics.Timer
|
CreatedAndUpdatedStateTimer metrics.Timer
|
||||||
CreatedAndUpdatedStateWithIntermediateNodesTimer metrics.Timer
|
|
||||||
DeletedOrUpdatedStateTimer metrics.Timer
|
DeletedOrUpdatedStateTimer metrics.Timer
|
||||||
BuildAccountUpdatesTimer metrics.Timer
|
BuildAccountUpdatesTimer metrics.Timer
|
||||||
BuildAccountCreationsTimer metrics.Timer
|
BuildAccountCreationsTimer metrics.Timer
|
||||||
@ -89,11 +87,8 @@ type IndexerMetricsHandles struct {
|
|||||||
DeletedOrUpdatedStorageTimer metrics.Timer
|
DeletedOrUpdatedStorageTimer metrics.Timer
|
||||||
CreatedAndUpdatedStorageTimer metrics.Timer
|
CreatedAndUpdatedStorageTimer metrics.Timer
|
||||||
BuildStorageNodesIncrementalTimer metrics.Timer
|
BuildStorageNodesIncrementalTimer metrics.Timer
|
||||||
BuildStateTrieObjectTimer metrics.Timer
|
|
||||||
BuildStateTrieTimer metrics.Timer
|
|
||||||
BuildStateDiffObjectTimer metrics.Timer
|
BuildStateDiffObjectTimer metrics.Timer
|
||||||
WriteStateDiffObjectTimer metrics.Timer
|
WriteStateDiffObjectTimer metrics.Timer
|
||||||
CreatedAndUpdatedStateTimer metrics.Timer
|
|
||||||
BuildStorageNodesEventualTimer metrics.Timer
|
BuildStorageNodesEventualTimer metrics.Timer
|
||||||
BuildStorageNodesFromTrieTimer metrics.Timer
|
BuildStorageNodesFromTrieTimer metrics.Timer
|
||||||
BuildRemovedAccountStorageNodesTimer metrics.Timer
|
BuildRemovedAccountStorageNodesTimer metrics.Timer
|
||||||
@ -114,9 +109,8 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
|||||||
UncleProcessingTimer: metrics.NewTimer(),
|
UncleProcessingTimer: metrics.NewTimer(),
|
||||||
TxAndRecProcessingTimer: metrics.NewTimer(),
|
TxAndRecProcessingTimer: metrics.NewTimer(),
|
||||||
StateStoreCodeProcessingTimer: metrics.NewTimer(),
|
StateStoreCodeProcessingTimer: metrics.NewTimer(),
|
||||||
BuildStateDiffWithIntermediateStateNodesTimer: metrics.NewTimer(),
|
BuildStateDiffTimer: metrics.NewTimer(),
|
||||||
BuildStateDiffWithoutIntermediateStateNodesTimer: metrics.NewTimer(),
|
CreatedAndUpdatedStateTimer: metrics.NewTimer(),
|
||||||
CreatedAndUpdatedStateWithIntermediateNodesTimer: metrics.NewTimer(),
|
|
||||||
DeletedOrUpdatedStateTimer: metrics.NewTimer(),
|
DeletedOrUpdatedStateTimer: metrics.NewTimer(),
|
||||||
BuildAccountUpdatesTimer: metrics.NewTimer(),
|
BuildAccountUpdatesTimer: metrics.NewTimer(),
|
||||||
BuildAccountCreationsTimer: metrics.NewTimer(),
|
BuildAccountCreationsTimer: metrics.NewTimer(),
|
||||||
@ -130,11 +124,8 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
|||||||
DeletedOrUpdatedStorageTimer: metrics.NewTimer(),
|
DeletedOrUpdatedStorageTimer: metrics.NewTimer(),
|
||||||
CreatedAndUpdatedStorageTimer: metrics.NewTimer(),
|
CreatedAndUpdatedStorageTimer: metrics.NewTimer(),
|
||||||
BuildStorageNodesIncrementalTimer: metrics.NewTimer(),
|
BuildStorageNodesIncrementalTimer: metrics.NewTimer(),
|
||||||
BuildStateTrieObjectTimer: metrics.NewTimer(),
|
|
||||||
BuildStateTrieTimer: metrics.NewTimer(),
|
|
||||||
BuildStateDiffObjectTimer: metrics.NewTimer(),
|
BuildStateDiffObjectTimer: metrics.NewTimer(),
|
||||||
WriteStateDiffObjectTimer: metrics.NewTimer(),
|
WriteStateDiffObjectTimer: metrics.NewTimer(),
|
||||||
CreatedAndUpdatedStateTimer: metrics.NewTimer(),
|
|
||||||
BuildStorageNodesEventualTimer: metrics.NewTimer(),
|
BuildStorageNodesEventualTimer: metrics.NewTimer(),
|
||||||
BuildStorageNodesFromTrieTimer: metrics.NewTimer(),
|
BuildStorageNodesFromTrieTimer: metrics.NewTimer(),
|
||||||
BuildRemovedAccountStorageNodesTimer: metrics.NewTimer(),
|
BuildRemovedAccountStorageNodesTimer: metrics.NewTimer(),
|
||||||
@ -153,9 +144,8 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
|||||||
reg.Register(metricName(subsys, "t_uncle_processing"), ctx.UncleProcessingTimer)
|
reg.Register(metricName(subsys, "t_uncle_processing"), ctx.UncleProcessingTimer)
|
||||||
reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.TxAndRecProcessingTimer)
|
reg.Register(metricName(subsys, "t_tx_receipt_processing"), ctx.TxAndRecProcessingTimer)
|
||||||
reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.StateStoreCodeProcessingTimer)
|
reg.Register(metricName(subsys, "t_state_store_code_processing"), ctx.StateStoreCodeProcessingTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_statediff_with_intermediate_state_nodes"), ctx.BuildStateDiffWithIntermediateStateNodesTimer)
|
reg.Register(metricName(subsys, "t_build_statediff"), ctx.BuildStateDiffTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_statediff_without_intermediate_state_nodes"), ctx.BuildStateDiffWithoutIntermediateStateNodesTimer)
|
reg.Register(metricName(subsys, "t_created_and_update_state"), ctx.CreatedAndUpdatedStateTimer)
|
||||||
reg.Register(metricName(subsys, "t_created_and_update_state_with_intermediate_nodes"), ctx.CreatedAndUpdatedStateWithIntermediateNodesTimer)
|
|
||||||
reg.Register(metricName(subsys, "t_deleted_or_updated_state"), ctx.DeletedOrUpdatedStateTimer)
|
reg.Register(metricName(subsys, "t_deleted_or_updated_state"), ctx.DeletedOrUpdatedStateTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_account_updates"), ctx.BuildAccountUpdatesTimer)
|
reg.Register(metricName(subsys, "t_build_account_updates"), ctx.BuildAccountUpdatesTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_account_creations"), ctx.BuildAccountCreationsTimer)
|
reg.Register(metricName(subsys, "t_build_account_creations"), ctx.BuildAccountCreationsTimer)
|
||||||
@ -169,8 +159,6 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
|||||||
reg.Register(metricName(subsys, "t_created_and_updated_storage"), ctx.CreatedAndUpdatedStorageTimer)
|
reg.Register(metricName(subsys, "t_created_and_updated_storage"), ctx.CreatedAndUpdatedStorageTimer)
|
||||||
reg.Register(metricName(subsys, "t_deleted_or_updated_storage"), ctx.DeletedOrUpdatedStorageTimer)
|
reg.Register(metricName(subsys, "t_deleted_or_updated_storage"), ctx.DeletedOrUpdatedStorageTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_storage_nodes_incremental"), ctx.BuildStorageNodesIncrementalTimer)
|
reg.Register(metricName(subsys, "t_build_storage_nodes_incremental"), ctx.BuildStorageNodesIncrementalTimer)
|
||||||
reg.Register(metricName(subsys, "t_build_state_trie_object"), ctx.BuildStateTrieObjectTimer)
|
|
||||||
reg.Register(metricName(subsys, "t_build_state_trie"), ctx.BuildStateTrieTimer)
|
|
||||||
reg.Register(metricName(subsys, "t_build_statediff_object"), ctx.BuildStateDiffObjectTimer)
|
reg.Register(metricName(subsys, "t_build_statediff_object"), ctx.BuildStateDiffObjectTimer)
|
||||||
reg.Register(metricName(subsys, "t_write_statediff_object"), ctx.WriteStateDiffObjectTimer)
|
reg.Register(metricName(subsys, "t_write_statediff_object"), ctx.WriteStateDiffObjectTimer)
|
||||||
reg.Register(metricName(subsys, "t_created_and_updated_state"), ctx.CreatedAndUpdatedStateTimer)
|
reg.Register(metricName(subsys, "t_created_and_updated_state"), ctx.CreatedAndUpdatedStateTimer)
|
||||||
@ -253,7 +241,8 @@ func (met *dbMetricsHandles) Update(stats DbStats) {
|
|||||||
|
|
||||||
func ReportAndUpdateDuration(msg string, start time.Time, logger log.Logger, timer metrics.Timer) {
|
func ReportAndUpdateDuration(msg string, start time.Time, logger log.Logger, timer metrics.Timer) {
|
||||||
since := UpdateDuration(start, timer)
|
since := UpdateDuration(start, timer)
|
||||||
logger.Trace(fmt.Sprintf("%s duration=%dms", msg, since.Milliseconds()))
|
// This is very noisy so we log at Trace.
|
||||||
|
logger.Trace(msg, "duration", since)
|
||||||
}
|
}
|
||||||
|
|
||||||
func UpdateDuration(start time.Time, timer metrics.Timer) time.Duration {
|
func UpdateDuration(start time.Time, timer metrics.Timer) time.Duration {
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
|
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
const startingCacheCapacity = 1024 * 24
|
const startingCacheCapacity = 1024 * 24
|
||||||
@ -36,7 +36,7 @@ type BatchTx struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
dbtx Tx
|
dbtx Tx
|
||||||
stm string
|
stm string
|
||||||
quit chan struct{}
|
quit chan (chan<- struct{})
|
||||||
iplds chan models.IPLDModel
|
iplds chan models.IPLDModel
|
||||||
ipldCache models.IPLDBatch
|
ipldCache models.IPLDBatch
|
||||||
removedCacheFlag *uint32
|
removedCacheFlag *uint32
|
||||||
@ -81,8 +81,9 @@ func (tx *BatchTx) cache() {
|
|||||||
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
|
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
|
||||||
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
|
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
|
||||||
tx.cacheWg.Done()
|
tx.cacheWg.Done()
|
||||||
case <-tx.quit:
|
case confirm := <-tx.quit:
|
||||||
tx.ipldCache = models.IPLDBatch{}
|
tx.ipldCache = models.IPLDBatch{}
|
||||||
|
confirm <- struct{}{}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -121,6 +122,6 @@ func (tx *BatchTx) cacheRemoved(key string, value []byte) {
|
|||||||
// rollback sql transaction and log any error
|
// rollback sql transaction and log any error
|
||||||
func rollback(ctx context.Context, tx Tx) {
|
func rollback(ctx context.Context, tx Tx) {
|
||||||
if err := tx.Rollback(ctx); err != nil {
|
if err := tx.Rollback(ctx); err != nil {
|
||||||
log.Error(err.Error())
|
log.Error("error during rollback", "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,27 +20,26 @@
|
|||||||
package sql
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
metrics2 "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
||||||
@ -130,7 +129,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
BlockNumber: block.Number().String(),
|
BlockNumber: block.Number().String(),
|
||||||
stm: sdi.dbWriter.db.InsertIPLDsStm(),
|
stm: sdi.dbWriter.db.InsertIPLDsStm(),
|
||||||
iplds: make(chan models.IPLDModel),
|
iplds: make(chan models.IPLDModel),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan (chan<- struct{})),
|
||||||
ipldCache: models.IPLDBatch{
|
ipldCache: models.IPLDBatch{
|
||||||
BlockNumbers: make([]string, 0, startingCacheCapacity),
|
BlockNumbers: make([]string, 0, startingCacheCapacity),
|
||||||
Keys: make([]string, 0, startingCacheCapacity),
|
Keys: make([]string, 0, startingCacheCapacity),
|
||||||
@ -140,7 +139,10 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
// handle transaction commit or rollback for any return case
|
// handle transaction commit or rollback for any return case
|
||||||
submit: func(self *BatchTx, err error) error {
|
submit: func(self *BatchTx, err error) error {
|
||||||
defer func() {
|
defer func() {
|
||||||
|
confirm := make(chan struct{})
|
||||||
|
self.quit <- confirm
|
||||||
close(self.quit)
|
close(self.quit)
|
||||||
|
<-confirm
|
||||||
close(self.iplds)
|
close(self.iplds)
|
||||||
}()
|
}()
|
||||||
if p := recover(); p != nil {
|
if p := recover(); p != nil {
|
||||||
@ -219,6 +221,16 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
|||||||
return blockTx, err
|
return blockTx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CurrentBlock returns the HeaderModel of the highest existing block in the database.
|
||||||
|
func (sdi *StateDiffIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
||||||
|
return sdi.dbWriter.maxHeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGaps returns a list of gaps in the database found within the specified block range.
|
||||||
|
func (sdi *StateDiffIndexer) DetectGaps(beginBlockNumber uint64, endBlockNumber uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
return sdi.dbWriter.detectGaps(beginBlockNumber, endBlockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
// processHeader publishes and indexes a header IPLD in Postgres
|
// processHeader publishes and indexes a header IPLD in Postgres
|
||||||
// it returns the headerID
|
// it returns the headerID
|
||||||
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode ipld.IPLD, reward, td *big.Int) (string, error) {
|
func (sdi *StateDiffIndexer) processHeader(tx *BatchTx, header *types.Header, headerNode ipld.IPLD, reward, td *big.Int) (string, error) {
|
||||||
@ -256,8 +268,8 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
preparedHash := crypto.Keccak256Hash(uncleEncoding)
|
||||||
if !bytes.Equal(preparedHash.Bytes(), unclesHash.Bytes()) {
|
if preparedHash != unclesHash {
|
||||||
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.Hex(), unclesHash.Hex())
|
return fmt.Errorf("derived uncles hash (%s) does not match the hash in the header (%s)", preparedHash.String(), unclesHash.String())
|
||||||
}
|
}
|
||||||
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
unclesCID, err := ipld.RawdataToCid(ipld.MEthHeaderList, uncleEncoding, multihash.KECCAK_256)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -363,7 +375,7 @@ func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs
|
|||||||
tx.cacheIPLD(args.logNodes[i][idx])
|
tx.cacheIPLD(args.logNodes[i][idx])
|
||||||
topicSet := make([]string, 4)
|
topicSet := make([]string, 4)
|
||||||
for ti, topic := range l.Topics {
|
for ti, topic := range l.Topics {
|
||||||
topicSet[ti] = topic.Hex()
|
topicSet[ti] = topic.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
logDataSet[idx] = &models.LogsModel{
|
logDataSet[idx] = &models.LogsModel{
|
||||||
@ -469,6 +481,11 @@ func (sdi *StateDiffIndexer) PushIPLD(batch interfaces.Batch, ipld sdtypes.IPLD)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasBlock checks whether the indicated block already exists in the database.
|
||||||
|
func (sdi *StateDiffIndexer) HasBlock(hash common.Hash, number uint64) (bool, error) {
|
||||||
|
return sdi.dbWriter.hasHeader(hash, number)
|
||||||
|
}
|
||||||
|
|
||||||
// Close satisfies io.Closer
|
// Close satisfies io.Closer
|
||||||
func (sdi *StateDiffIndexer) Close() error {
|
func (sdi *StateDiffIndexer) Close() error {
|
||||||
return sdi.dbWriter.Close()
|
return sdi.dbWriter.Close()
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Database interfaces required by the sql indexer
|
// Database interfaces required by the sql indexer
|
||||||
@ -45,6 +45,9 @@ type Driver interface {
|
|||||||
|
|
||||||
// Statements interface to accommodate different SQL query syntax
|
// Statements interface to accommodate different SQL query syntax
|
||||||
type Statements interface {
|
type Statements interface {
|
||||||
|
DetectGapsStm() string
|
||||||
|
MaxHeaderStm() string
|
||||||
|
ExistsHeaderStm() string
|
||||||
InsertHeaderStm() string
|
InsertHeaderStm() string
|
||||||
InsertUncleStm() string
|
InsertUncleStm() string
|
||||||
InsertTxStm() string
|
InsertTxStm() string
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Changing this to 1 would make sure only sequential COPYs were combined.
|
// Changing this to 1 would make sure only sequential COPYs were combined.
|
||||||
@ -84,13 +84,13 @@ func (tx *DelayedTx) Commit(ctx context.Context) error {
|
|||||||
for _, item := range tx.cache {
|
for _, item := range tx.cache {
|
||||||
switch item := item.(type) {
|
switch item := item.(type) {
|
||||||
case *copyFrom:
|
case *copyFrom:
|
||||||
_, err := base.CopyFrom(ctx, item.tableName, item.columnNames, item.rows)
|
_, err = base.CopyFrom(ctx, item.tableName, item.columnNames, item.rows)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("COPY error", "table", item.tableName, "err", err)
|
log.Error("COPY error", "table", item.tableName, "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case cachedStmt:
|
case cachedStmt:
|
||||||
_, err := base.Exec(ctx, item.sql, item.args...)
|
_, err = base.Exec(ctx, item.sql, item.args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -18,20 +18,18 @@ package mainnet_tests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -41,13 +39,6 @@ var (
|
|||||||
chainConf = params.MainnetChainConfig
|
chainConf = params.MainnetChainConfig
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
if os.Getenv("MODE") != "statediff" {
|
|
||||||
fmt.Println("Skipping statediff test")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMainnetIndexer(t *testing.T) {
|
func TestMainnetIndexer(t *testing.T) {
|
||||||
conf := test_helpers.GetTestConfig()
|
conf := test_helpers.GetTestConfig()
|
||||||
|
|
||||||
|
@ -22,13 +22,14 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupLegacyPGXIndexer(t *testing.T) {
|
func setupLegacyPGXIndexer(t *testing.T) {
|
||||||
db, err = postgres.SetupPGXDB(postgres.TestConfig)
|
config, _ := postgres.TestConfig.WithEnv()
|
||||||
|
db, err = postgres.SetupPGXDB(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -23,23 +23,33 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultPgConfig postgres.Config
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var err error
|
||||||
|
defaultPgConfig, err = postgres.TestConfig.WithEnv()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func setupPGXIndexer(t *testing.T, config postgres.Config) {
|
func setupPGXIndexer(t *testing.T, config postgres.Config) {
|
||||||
db, err = postgres.SetupPGXDB(config)
|
db, err = postgres.SetupPGXDB(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
|
ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestChainConfig, db)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupPGX(t *testing.T) {
|
func setupPGX(t *testing.T) {
|
||||||
setupPGXWithConfig(t, postgres.TestConfig)
|
setupPGXWithConfig(t, defaultPgConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupPGXWithConfig(t *testing.T, config postgres.Config) {
|
func setupPGXWithConfig(t *testing.T, config postgres.Config) {
|
||||||
@ -48,7 +58,7 @@ func setupPGXWithConfig(t *testing.T, config postgres.Config) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupPGXNonCanonical(t *testing.T) {
|
func setupPGXNonCanonical(t *testing.T) {
|
||||||
setupPGXIndexer(t, postgres.TestConfig)
|
setupPGXIndexer(t, defaultPgConfig)
|
||||||
test.SetupTestDataNonCanonical(t, ind)
|
test.SetupTestDataNonCanonical(t, ind)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +69,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexHeaderIPLDs(t, db)
|
test.DoTestPublishAndIndexHeaderIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -67,7 +77,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionIPLDs(t, db)
|
test.DoTestPublishAndIndexTransactionIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
||||||
@ -75,7 +85,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogIPLDs(t, db)
|
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -83,7 +93,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptIPLDs(t, db)
|
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -91,7 +101,7 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateIPLDs(t, db)
|
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -99,21 +109,21 @@ func TestPGXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageIPLDs(t, db)
|
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index with CopyFrom enabled.", func(t *testing.T) {
|
t.Run("Publish and index with CopyFrom enabled.", func(t *testing.T) {
|
||||||
config := postgres.TestConfig
|
config := defaultPgConfig
|
||||||
config.CopyFrom = true
|
config.CopyFrom = true
|
||||||
|
|
||||||
setupPGXWithConfig(t, config)
|
setupPGXWithConfig(t, config)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateIPLDs(t, db)
|
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||||
test.TestPublishAndIndexStorageIPLDs(t, db)
|
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||||
test.TestPublishAndIndexReceiptIPLDs(t, db)
|
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||||
test.TestPublishAndIndexLogIPLDs(t, db)
|
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,7 +142,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionsNonCanonical(t, db)
|
test.DoTestPublishAndIndexTransactionsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipts", func(t *testing.T) {
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
@ -140,7 +150,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptsNonCanonical(t, db)
|
test.DoTestPublishAndIndexReceiptsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index logs", func(t *testing.T) {
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
@ -148,7 +158,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogsNonCanonical(t, db)
|
test.DoTestPublishAndIndexLogsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state nodes", func(t *testing.T) {
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
@ -156,7 +166,7 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateNonCanonical(t, db)
|
test.DoTestPublishAndIndexStateNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
@ -164,12 +174,12 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageNonCanonical(t, db)
|
test.DoTestPublishAndIndexStorageNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPGXWatchAddressMethods(t *testing.T) {
|
func TestPGXWatchAddressMethods(t *testing.T) {
|
||||||
setupPGXIndexer(t, postgres.TestConfig)
|
setupPGXIndexer(t, defaultPgConfig)
|
||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 1, 0, 1)
|
defer checkTxClosure(t, 1, 0, 1)
|
||||||
|
|
||||||
|
@ -23,49 +23,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DriverType to explicitly type the kind of sql driver we are using
|
|
||||||
type DriverType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
PGX DriverType = "PGX"
|
|
||||||
SQLX DriverType = "SQLX"
|
|
||||||
Unknown DriverType = "Unknown"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Env variables
|
|
||||||
const (
|
|
||||||
DATABASE_NAME = "DATABASE_NAME"
|
|
||||||
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
|
||||||
DATABASE_PORT = "DATABASE_PORT"
|
|
||||||
DATABASE_USER = "DATABASE_USER"
|
|
||||||
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResolveDriverType resolves a DriverType from a provided string
|
|
||||||
func ResolveDriverType(str string) (DriverType, error) {
|
|
||||||
switch strings.ToLower(str) {
|
|
||||||
case "pgx", "pgxpool":
|
|
||||||
return PGX, nil
|
|
||||||
case "sqlx":
|
|
||||||
return SQLX, nil
|
|
||||||
default:
|
|
||||||
return Unknown, fmt.Errorf("unrecognized driver type string: %s", str)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestConfig specifies default parameters for connecting to a testing DB
|
|
||||||
var TestConfig = Config{
|
|
||||||
Hostname: "localhost",
|
|
||||||
Port: 8077,
|
|
||||||
DatabaseName: "cerc_testing",
|
|
||||||
Username: "vdbm",
|
|
||||||
Password: "password",
|
|
||||||
Driver: SQLX,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config holds params for a Postgres db
|
// Config holds params for a Postgres db
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// conn string params
|
// conn string params
|
||||||
@ -98,6 +58,34 @@ type Config struct {
|
|||||||
CopyFrom bool
|
CopyFrom bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DriverType to explicitly type the kind of sql driver we are using
|
||||||
|
type DriverType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
PGX DriverType = "PGX"
|
||||||
|
SQLX DriverType = "SQLX"
|
||||||
|
Invalid DriverType = "Invalid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Env variables
|
||||||
|
const (
|
||||||
|
DATABASE_NAME = "DATABASE_NAME"
|
||||||
|
DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
|
||||||
|
DATABASE_PORT = "DATABASE_PORT"
|
||||||
|
DATABASE_USER = "DATABASE_USER"
|
||||||
|
DATABASE_PASSWORD = "DATABASE_PASSWORD"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestConfig specifies default parameters for connecting to a testing DB
|
||||||
|
var TestConfig = Config{
|
||||||
|
Hostname: "localhost",
|
||||||
|
Port: 8077,
|
||||||
|
DatabaseName: "cerc_testing",
|
||||||
|
Username: "vdbm",
|
||||||
|
Password: "password",
|
||||||
|
Driver: SQLX,
|
||||||
|
}
|
||||||
|
|
||||||
// Type satisfies interfaces.Config
|
// Type satisfies interfaces.Config
|
||||||
func (c Config) Type() shared.DBType {
|
func (c Config) Type() shared.DBType {
|
||||||
return shared.POSTGRES
|
return shared.POSTGRES
|
||||||
@ -116,6 +104,7 @@ func (c Config) DbConnectionString() string {
|
|||||||
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName)
|
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", c.Hostname, c.Port, c.DatabaseName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithEnv overrides the config with env variables, returning a new instance
|
||||||
func (c Config) WithEnv() (Config, error) {
|
func (c Config) WithEnv() (Config, error) {
|
||||||
if val := os.Getenv(DATABASE_NAME); val != "" {
|
if val := os.Getenv(DATABASE_NAME); val != "" {
|
||||||
c.DatabaseName = val
|
c.DatabaseName = val
|
||||||
@ -138,3 +127,26 @@ func (c Config) WithEnv() (Config, error) {
|
|||||||
}
|
}
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolveDriverType resolves a DriverType from a provided string
|
||||||
|
func ResolveDriverType(str string) (DriverType, error) {
|
||||||
|
switch strings.ToLower(str) {
|
||||||
|
case "pgx", "pgxpool":
|
||||||
|
return PGX, nil
|
||||||
|
case "sqlx":
|
||||||
|
return SQLX, nil
|
||||||
|
default:
|
||||||
|
return Invalid, fmt.Errorf("unrecognized driver type string: %s", str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set satisfies flag.Value
|
||||||
|
func (d *DriverType) Set(v string) (err error) {
|
||||||
|
*d, err = ResolveDriverType(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// String satisfies flag.Value
|
||||||
|
func (d *DriverType) String() string {
|
||||||
|
return strings.ToLower(string(*d))
|
||||||
|
}
|
||||||
|
@ -17,8 +17,10 @@
|
|||||||
package postgres
|
package postgres
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"fmt"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ sql.Database = &DB{}
|
var _ sql.Database = &DB{}
|
||||||
@ -39,6 +41,21 @@ type DB struct {
|
|||||||
sql.Driver
|
sql.Driver
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxHeaderStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) MaxHeaderStm() string {
|
||||||
|
return fmt.Sprintf("SELECT block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase FROM %s ORDER BY block_number DESC LIMIT 1", schema.TableHeader.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistsHeaderStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) ExistsHeaderStm() string {
|
||||||
|
return fmt.Sprintf("SELECT EXISTS(SELECT 1 from %s WHERE block_number = $1::BIGINT AND block_hash = $2::TEXT LIMIT 1)", schema.TableHeader.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DetectGapsStm satisfies the sql.Statements interface
|
||||||
|
func (db *DB) DetectGapsStm() string {
|
||||||
|
return fmt.Sprintf("SELECT block_number + 1 AS first_missing, (next_bn - 1) AS last_missing FROM (SELECT block_number, LEAD(block_number) OVER (ORDER BY block_number) AS next_bn FROM %s WHERE block_number >= $1::BIGINT AND block_number <= $2::BIGINT) h WHERE next_bn > block_number + 1", schema.TableHeader.Name)
|
||||||
|
}
|
||||||
|
|
||||||
// InsertHeaderStm satisfies the sql.Statements interface
|
// InsertHeaderStm satisfies the sql.Statements interface
|
||||||
// Stm == Statement
|
// Stm == Statement
|
||||||
func (db *DB) InsertHeaderStm() string {
|
func (db *DB) InsertHeaderStm() string {
|
||||||
|
@ -18,8 +18,9 @@ package postgres
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/jackc/pgx/v4"
|
"github.com/jackc/pgx/v4"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LogAdapter struct {
|
type LogAdapter struct {
|
||||||
@ -31,31 +32,26 @@ func NewLogAdapter(l log.Logger) *LogAdapter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *LogAdapter) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
func (l *LogAdapter) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
|
||||||
var logger log.Logger
|
args := make([]interface{}, 0)
|
||||||
if data != nil {
|
|
||||||
var args = make([]interface{}, 0)
|
|
||||||
for key, value := range data {
|
for key, value := range data {
|
||||||
if value != nil {
|
if value != nil {
|
||||||
args = append(args, key, value)
|
args = append(args, key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger = l.l.New(args...)
|
|
||||||
} else {
|
|
||||||
logger = l.l
|
|
||||||
}
|
|
||||||
|
|
||||||
|
logger := l.l
|
||||||
switch level {
|
switch level {
|
||||||
case pgx.LogLevelTrace:
|
case pgx.LogLevelTrace:
|
||||||
logger.Trace(msg)
|
logger.Trace(msg, args...)
|
||||||
case pgx.LogLevelDebug:
|
case pgx.LogLevelDebug:
|
||||||
logger.Debug(msg)
|
logger.Debug(msg, args...)
|
||||||
case pgx.LogLevelInfo:
|
case pgx.LogLevelInfo:
|
||||||
logger.Info(msg)
|
logger.Info(msg, args...)
|
||||||
case pgx.LogLevelWarn:
|
case pgx.LogLevelWarn:
|
||||||
logger.Warn(msg)
|
logger.Warn(msg, args...)
|
||||||
case pgx.LogLevelError:
|
case pgx.LogLevelError:
|
||||||
logger.Error(msg)
|
logger.Error(msg, args...)
|
||||||
default:
|
default:
|
||||||
logger.New("INVALID_PGX_LOG_LEVEL", level).Error(msg)
|
logger.Error(msg, "INVALID_PGX_LOG_LEVEL", level)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,16 +20,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
|
|
||||||
"github.com/georgysavva/scany/pgxscan"
|
"github.com/georgysavva/scany/pgxscan"
|
||||||
"github.com/jackc/pgconn"
|
"github.com/jackc/pgconn"
|
||||||
"github.com/jackc/pgx/v4"
|
"github.com/jackc/pgx/v4"
|
||||||
"github.com/jackc/pgx/v4/pgxpool"
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PGXDriver driver, implements sql.Driver
|
// PGXDriver driver, implements sql.Driver
|
||||||
@ -96,7 +96,7 @@ func MakeConfig(config Config) (*pgxpool.Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if config.LogStatements {
|
if config.LogStatements {
|
||||||
conf.ConnConfig.Logger = NewLogAdapter(log.New())
|
conf.ConnConfig.Logger = NewLogAdapter(log.DefaultLogger)
|
||||||
}
|
}
|
||||||
|
|
||||||
return conf, nil
|
return conf, nil
|
||||||
@ -106,8 +106,10 @@ func (pgx *PGXDriver) createNode() error {
|
|||||||
_, err := pgx.pool.Exec(
|
_, err := pgx.pool.Exec(
|
||||||
pgx.ctx,
|
pgx.ctx,
|
||||||
createNodeStm,
|
createNodeStm,
|
||||||
pgx.nodeInfo.GenesisBlock, pgx.nodeInfo.NetworkID,
|
pgx.nodeInfo.GenesisBlock,
|
||||||
pgx.nodeInfo.ID, pgx.nodeInfo.ClientName,
|
pgx.nodeInfo.NetworkID,
|
||||||
|
pgx.nodeInfo.ID,
|
||||||
|
pgx.nodeInfo.ClientName,
|
||||||
pgx.nodeInfo.ChainID)
|
pgx.nodeInfo.ChainID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrUnableToSetNode(err)
|
return ErrUnableToSetNode(err)
|
||||||
|
@ -26,12 +26,13 @@ import (
|
|||||||
"github.com/jackc/pgx/v4/pgxpool"
|
"github.com/jackc/pgx/v4/pgxpool"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
pgConfig, _ = postgres.MakeConfig(postgres.TestConfig)
|
pgConfig, _ = postgres.TestConfig.WithEnv()
|
||||||
|
pgxConfig, _ = postgres.MakeConfig(pgConfig)
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,9 +44,9 @@ func expectContainsSubstring(t *testing.T, full string, sub string) {
|
|||||||
|
|
||||||
func TestPostgresPGX(t *testing.T) {
|
func TestPostgresPGX(t *testing.T) {
|
||||||
t.Run("connects to the sql", func(t *testing.T) {
|
t.Run("connects to the sql", func(t *testing.T) {
|
||||||
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
|
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
|
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgxConfig.ConnString(), err)
|
||||||
}
|
}
|
||||||
if dbPool == nil {
|
if dbPool == nil {
|
||||||
t.Fatal("DB pool is nil")
|
t.Fatal("DB pool is nil")
|
||||||
@ -61,9 +62,9 @@ func TestPostgresPGX(t *testing.T) {
|
|||||||
// sized int, so use string representation of big.Int
|
// sized int, so use string representation of big.Int
|
||||||
// and cast on insert
|
// and cast on insert
|
||||||
|
|
||||||
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgConfig)
|
dbPool, err := pgxpool.ConnectConfig(context.Background(), pgxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgConfig.ConnString(), err)
|
t.Fatalf("failed to connect to db with connection string: %s err: %v", pgxConfig.ConnString(), err)
|
||||||
}
|
}
|
||||||
defer dbPool.Close()
|
defer dbPool.Close()
|
||||||
|
|
||||||
@ -111,7 +112,7 @@ func TestPostgresPGX(t *testing.T) {
|
|||||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||||
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
|
|
||||||
_, err := postgres.NewPGXDriver(ctx, postgres.TestConfig, badInfo)
|
_, err := postgres.NewPGXDriver(ctx, pgConfig, badInfo)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected an error")
|
t.Fatal("Expected an error")
|
||||||
}
|
}
|
||||||
|
@ -1,33 +0,0 @@
|
|||||||
// VulcanizeDB
|
|
||||||
// Copyright © 2019 Vulcanize
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package postgres_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if os.Getenv("MODE") != "statediff" {
|
|
||||||
fmt.Println("Skipping statediff test")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Root().SetHandler(log.DiscardHandler())
|
|
||||||
}
|
|
@ -24,9 +24,9 @@ import (
|
|||||||
|
|
||||||
"github.com/jmoiron/sqlx"
|
"github.com/jmoiron/sqlx"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SQLXDriver driver, implements sql.Driver
|
// SQLXDriver driver, implements sql.Driver
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPostgresSQLX(t *testing.T) {
|
func TestPostgresSQLX(t *testing.T) {
|
||||||
@ -35,7 +35,7 @@ func TestPostgresSQLX(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("connects to the database", func(t *testing.T) {
|
t.Run("connects to the database", func(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
connStr := postgres.TestConfig.DbConnectionString()
|
connStr := pgConfig.DbConnectionString()
|
||||||
|
|
||||||
sqlxdb, err = sqlx.Connect("postgres", connStr)
|
sqlxdb, err = sqlx.Connect("postgres", connStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -58,7 +58,7 @@ func TestPostgresSQLX(t *testing.T) {
|
|||||||
// sized int, so use string representation of big.Int
|
// sized int, so use string representation of big.Int
|
||||||
// and cast on insert
|
// and cast on insert
|
||||||
|
|
||||||
connStr := postgres.TestConfig.DbConnectionString()
|
connStr := pgConfig.DbConnectionString()
|
||||||
db, err := sqlx.Connect("postgres", connStr)
|
db, err := sqlx.Connect("postgres", connStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -109,7 +109,7 @@ func TestPostgresSQLX(t *testing.T) {
|
|||||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||||
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||||
|
|
||||||
_, err := postgres.NewSQLXDriver(ctx, postgres.TestConfig, badInfo)
|
_, err := postgres.NewSQLXDriver(ctx, pgConfig, badInfo)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected an error")
|
t.Fatal("Expected an error")
|
||||||
}
|
}
|
||||||
|
@ -19,13 +19,16 @@ package postgres
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/node"
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetupSQLXDB is used to setup a sqlx db for tests
|
// SetupSQLXDB is used to setup a sqlx db for tests
|
||||||
func SetupSQLXDB() (sql.Database, error) {
|
func SetupSQLXDB() (sql.Database, error) {
|
||||||
conf := TestConfig
|
conf, err := TestConfig.WithEnv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
conf.MaxIdle = 0
|
conf.MaxIdle = 0
|
||||||
driver, err := NewSQLXDriver(context.Background(), conf, node.Info{})
|
driver, err := NewSQLXDriver(context.Background(), conf, node.Info{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,9 +22,9 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupLegacySQLXIndexer(t *testing.T) {
|
func setupLegacySQLXIndexer(t *testing.T) {
|
||||||
|
@ -23,10 +23,10 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test"
|
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setupSQLXIndexer(t *testing.T) {
|
func setupSQLXIndexer(t *testing.T) {
|
||||||
@ -34,7 +34,7 @@ func setupSQLXIndexer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestConfig, db)
|
ind, err = sql.NewStateDiffIndexer(context.Background(), mocks.TestChainConfig, db)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,7 +55,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexHeaderIPLDs(t, db)
|
test.DoTestPublishAndIndexHeaderIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -63,7 +63,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionIPLDs(t, db)
|
test.DoTestPublishAndIndexTransactionIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
||||||
@ -71,7 +71,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogIPLDs(t, db)
|
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -79,7 +79,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptIPLDs(t, db)
|
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -87,7 +87,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateIPLDs(t, db)
|
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||||
@ -95,7 +95,7 @@ func TestSQLXIndexer(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageIPLDs(t, db)
|
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexTransactionsNonCanonical(t, db)
|
test.DoTestPublishAndIndexTransactionsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index receipts", func(t *testing.T) {
|
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||||
@ -122,7 +122,7 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexReceiptsNonCanonical(t, db)
|
test.DoTestPublishAndIndexReceiptsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index logs", func(t *testing.T) {
|
t.Run("Publish and index logs", func(t *testing.T) {
|
||||||
@ -130,7 +130,7 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexLogsNonCanonical(t, db)
|
test.DoTestPublishAndIndexLogsNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index state nodes", func(t *testing.T) {
|
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||||
@ -138,7 +138,7 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexStateNonCanonical(t, db)
|
test.DoTestPublishAndIndexStateNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||||
@ -146,7 +146,7 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
|
|||||||
defer tearDown(t)
|
defer tearDown(t)
|
||||||
defer checkTxClosure(t, 0, 0, 0)
|
defer checkTxClosure(t, 0, 0, 0)
|
||||||
|
|
||||||
test.TestPublishAndIndexStorageNonCanonical(t, db)
|
test.DoTestPublishAndIndexStorageNonCanonical(t, db)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,13 +20,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/jackc/pgtype"
|
"github.com/jackc/pgtype"
|
||||||
shopspring "github.com/jackc/pgtype/ext/shopspring-numeric"
|
shopspring "github.com/jackc/pgtype/ext/shopspring-numeric"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
"github.com/shopspring/decimal"
|
"github.com/shopspring/decimal"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Writer handles processing and writing of indexed IPLD objects to Postgres
|
// Writer handles processing and writing of indexed IPLD objects to Postgres
|
||||||
@ -46,6 +48,55 @@ func (w *Writer) Close() error {
|
|||||||
return w.db.Close()
|
return w.db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasHeader returns true if a matching hash+number record exists in the database, else false.
|
||||||
|
func (w *Writer) hasHeader(blockHash common.Hash, blockNumber uint64) (exists bool, err error) {
|
||||||
|
// pgx misdetects the parameter OIDs and selects int8, which can overflow.
|
||||||
|
// unfortunately there is no good place to override it, so it is safer to pass the uint64s as text
|
||||||
|
// and let PG handle the cast
|
||||||
|
err = w.db.QueryRow(w.db.Context(), w.db.ExistsHeaderStm(), strconv.FormatUint(blockNumber, 10), blockHash.String()).Scan(&exists)
|
||||||
|
return exists, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectGaps returns a list of BlockGaps detected within the specified block range
|
||||||
|
// For example, if the database contains blocks the overall range 1000:2000, but is missing blocks 1110:1230 and 1380
|
||||||
|
// it would return [{FirstMissing: 1110, LastMissing: 1230}, {FirstMissing: 1380, LastMissing: 1380}]
|
||||||
|
func (w *Writer) detectGaps(beginBlockNumber uint64, endBlockNumber uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
var gaps []*interfaces.BlockGap
|
||||||
|
// pgx misdetects the parameter OIDs and selects int8, which can overflow.
|
||||||
|
// unfortunately there is no good place to override it, so it is safer to pass the uint64s as text
|
||||||
|
// and let PG handle the cast
|
||||||
|
err := w.db.Select(w.db.Context(), &gaps, w.db.DetectGapsStm(), strconv.FormatUint(beginBlockNumber, 10), strconv.FormatUint(endBlockNumber, 10))
|
||||||
|
return gaps, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// maxHeader returns the header for the highest block number in the database.
|
||||||
|
// SELECT block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase FROM %s ORDER BY block_number DESC LIMIT 1
|
||||||
|
func (w *Writer) maxHeader() (*models.HeaderModel, error) {
|
||||||
|
var model models.HeaderModel
|
||||||
|
var err error
|
||||||
|
var number, td, reward uint64
|
||||||
|
err = w.db.QueryRow(w.db.Context(), w.db.MaxHeaderStm()).Scan(
|
||||||
|
&number,
|
||||||
|
&model.BlockHash,
|
||||||
|
&model.ParentHash,
|
||||||
|
&model.CID,
|
||||||
|
&td,
|
||||||
|
&model.NodeIDs,
|
||||||
|
&reward,
|
||||||
|
&model.StateRoot,
|
||||||
|
&model.TxRoot,
|
||||||
|
&model.RctRoot,
|
||||||
|
&model.UnclesHash,
|
||||||
|
&model.Bloom,
|
||||||
|
&model.Timestamp,
|
||||||
|
&model.Coinbase,
|
||||||
|
)
|
||||||
|
model.BlockNumber = strconv.FormatUint(number, 10)
|
||||||
|
model.TotalDifficulty = strconv.FormatUint(td, 10)
|
||||||
|
model.Reward = strconv.FormatUint(reward, 10)
|
||||||
|
return &model, err
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase)
|
INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase)
|
||||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
||||||
|
@ -17,18 +17,22 @@
|
|||||||
package interfaces
|
package interfaces
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// StateDiffIndexer interface required to index statediff data
|
// StateDiffIndexer interface required to index statediff data
|
||||||
type StateDiffIndexer interface {
|
type StateDiffIndexer interface {
|
||||||
|
DetectGaps(beginBlock uint64, endBlock uint64) ([]*BlockGap, error)
|
||||||
|
CurrentBlock() (*models.HeaderModel, error)
|
||||||
|
HasBlock(hash common.Hash, number uint64) (bool, error)
|
||||||
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
|
PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (Batch, error)
|
||||||
PushStateNode(tx Batch, stateNode sdtypes.StateLeafNode, headerID string) error
|
PushStateNode(tx Batch, stateNode sdtypes.StateLeafNode, headerID string) error
|
||||||
PushIPLD(tx Batch, ipld sdtypes.IPLD) error
|
PushIPLD(tx Batch, ipld sdtypes.IPLD) error
|
||||||
@ -41,7 +45,7 @@ type StateDiffIndexer interface {
|
|||||||
SetWatchedAddresses(args []sdtypes.WatchAddressArg, currentBlockNumber *big.Int) error
|
SetWatchedAddresses(args []sdtypes.WatchAddressArg, currentBlockNumber *big.Int) error
|
||||||
ClearWatchedAddresses() error
|
ClearWatchedAddresses() error
|
||||||
|
|
||||||
io.Closer
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Batch required for indexing data atomically
|
// Batch required for indexing data atomically
|
||||||
@ -53,3 +57,9 @@ type Batch interface {
|
|||||||
type Config interface {
|
type Config interface {
|
||||||
Type() shared.DBType
|
Type() shared.DBType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Used to represent a gap in statediffed blocks
|
||||||
|
type BlockGap struct {
|
||||||
|
FirstMissing uint64 `json:"firstMissing"`
|
||||||
|
LastMissing uint64 `json:"lastMissing"`
|
||||||
|
}
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
package ipld
|
package ipld
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
mh "github.com/multiformats/go-multihash"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
mh "github.com/multiformats/go-multihash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// EthLog (eth-log, codec 0x9a), represents an ethereum block header
|
// EthLog (eth-log, codec 0x9a), represents an ethereum block header
|
||||||
|
@ -22,25 +22,25 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/test_helpers"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test variables
|
// Test variables
|
||||||
var (
|
var (
|
||||||
// block data
|
// block data
|
||||||
TestConfig = params.MainnetChainConfig
|
TestChainConfig = params.MainnetChainConfig
|
||||||
BlockNumber = TestConfig.LondonBlock
|
BlockNumber = TestChainConfig.LondonBlock
|
||||||
|
|
||||||
// canonical block at London height
|
// canonical block at London height
|
||||||
// includes 5 transactions: 3 Legacy + 1 EIP-2930 + 1 EIP-1559
|
// includes 5 transactions: 3 Legacy + 1 EIP-2930 + 1 EIP-1559
|
||||||
@ -55,7 +55,7 @@ var (
|
|||||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
||||||
}
|
}
|
||||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestConfig, BlockNumber)
|
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestChainConfig, BlockNumber)
|
||||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, trie.NewEmpty(nil))
|
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, trie.NewEmpty(nil))
|
||||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ var (
|
|||||||
// includes 2nd and 5th transactions from the canonical block
|
// includes 2nd and 5th transactions from the canonical block
|
||||||
MockNonCanonicalHeader = MockHeader
|
MockNonCanonicalHeader = MockHeader
|
||||||
MockNonCanonicalBlockTransactions = types.Transactions{MockTransactions[1], MockTransactions[4]}
|
MockNonCanonicalBlockTransactions = types.Transactions{MockTransactions[1], MockTransactions[4]}
|
||||||
MockNonCanonicalBlockReceipts = createNonCanonicalBlockReceipts(TestConfig, BlockNumber, MockNonCanonicalBlockTransactions)
|
MockNonCanonicalBlockReceipts = createNonCanonicalBlockReceipts(TestChainConfig, BlockNumber, MockNonCanonicalBlockTransactions)
|
||||||
MockNonCanonicalBlock = types.NewBlock(&MockNonCanonicalHeader, MockNonCanonicalBlockTransactions, nil, MockNonCanonicalBlockReceipts, trie.NewEmpty(nil))
|
MockNonCanonicalBlock = types.NewBlock(&MockNonCanonicalHeader, MockNonCanonicalBlockTransactions, nil, MockNonCanonicalBlockReceipts, trie.NewEmpty(nil))
|
||||||
MockNonCanonicalHeaderRlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock.Header())
|
MockNonCanonicalHeaderRlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock.Header())
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ var (
|
|||||||
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
||||||
}
|
}
|
||||||
MockNonCanonicalBlock2Transactions = types.Transactions{MockTransactions[2], MockTransactions[4]}
|
MockNonCanonicalBlock2Transactions = types.Transactions{MockTransactions[2], MockTransactions[4]}
|
||||||
MockNonCanonicalBlock2Receipts = createNonCanonicalBlockReceipts(TestConfig, Block2Number, MockNonCanonicalBlock2Transactions)
|
MockNonCanonicalBlock2Receipts = createNonCanonicalBlockReceipts(TestChainConfig, Block2Number, MockNonCanonicalBlock2Transactions)
|
||||||
MockNonCanonicalBlock2 = types.NewBlock(&MockNonCanonicalHeader2, MockNonCanonicalBlock2Transactions, nil, MockNonCanonicalBlock2Receipts, trie.NewEmpty(nil))
|
MockNonCanonicalBlock2 = types.NewBlock(&MockNonCanonicalHeader2, MockNonCanonicalBlock2Transactions, nil, MockNonCanonicalBlock2Receipts, trie.NewEmpty(nil))
|
||||||
MockNonCanonicalHeader2Rlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock2.Header())
|
MockNonCanonicalHeader2Rlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock2.Header())
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ var (
|
|||||||
StoragePartialPath,
|
StoragePartialPath,
|
||||||
StorageValue,
|
StorageValue,
|
||||||
})
|
})
|
||||||
StorageLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String()
|
StorageLeafNodeCID = ipld.Keccak256ToCid(ipld.MEthStorageTrie, crypto.Keccak256(StorageLeafNode)).String()
|
||||||
|
|
||||||
nonce1 = uint64(1)
|
nonce1 = uint64(1)
|
||||||
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
ContractRoot = "0x821e2556a290c86405f8160a2d662042a431ba456b9db265c79bb837c04be5f0"
|
||||||
@ -169,7 +169,7 @@ var (
|
|||||||
ContractPartialPath,
|
ContractPartialPath,
|
||||||
ContractAccount,
|
ContractAccount,
|
||||||
})
|
})
|
||||||
ContractLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(ContractLeafNode)).String()
|
ContractLeafNodeCID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(ContractLeafNode)).String()
|
||||||
|
|
||||||
Contract2LeafKey = test_helpers.AddressToLeafKey(ContractAddress2)
|
Contract2LeafKey = test_helpers.AddressToLeafKey(ContractAddress2)
|
||||||
storage2Location = common.HexToHash("2")
|
storage2Location = common.HexToHash("2")
|
||||||
@ -195,7 +195,7 @@ var (
|
|||||||
AccountPartialPath,
|
AccountPartialPath,
|
||||||
Account,
|
Account,
|
||||||
})
|
})
|
||||||
AccountLeafNodeCID = ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(AccountLeafNode)).String()
|
AccountLeafNodeCID = ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(AccountLeafNode)).String()
|
||||||
|
|
||||||
StateDiffs = []sdtypes.StateLeafNode{
|
StateDiffs = []sdtypes.StateLeafNode{
|
||||||
{
|
{
|
||||||
|
@ -28,7 +28,7 @@ const (
|
|||||||
POSTGRES DBType = "Postgres"
|
POSTGRES DBType = "Postgres"
|
||||||
DUMP DBType = "Dump"
|
DUMP DBType = "Dump"
|
||||||
FILE DBType = "File"
|
FILE DBType = "File"
|
||||||
UNKNOWN DBType = "Unknown"
|
INVALID DBType = "Invalid"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResolveDBType resolves a DBType from a provided string
|
// ResolveDBType resolves a DBType from a provided string
|
||||||
@ -41,6 +41,17 @@ func ResolveDBType(str string) (DBType, error) {
|
|||||||
case "file", "f", "fs":
|
case "file", "f", "fs":
|
||||||
return FILE, nil
|
return FILE, nil
|
||||||
default:
|
default:
|
||||||
return UNKNOWN, fmt.Errorf("unrecognized db type string: %s", str)
|
return INVALID, fmt.Errorf("unrecognized db type string: %s", str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set satisfies flag.Value
|
||||||
|
func (dbt *DBType) Set(v string) (err error) {
|
||||||
|
*dbt, err = ResolveDBType(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// String satisfies flag.Value
|
||||||
|
func (dbt *DBType) String() string {
|
||||||
|
return strings.ToLower(string(*dbt))
|
||||||
|
}
|
||||||
|
@ -25,13 +25,13 @@ func HandleZeroAddrPointer(to *common.Address) string {
|
|||||||
if to == nil {
|
if to == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return to.Hex()
|
return to.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleZeroAddr will return an empty string for a 0 value address
|
// HandleZeroAddr will return an empty string for a 0 value address
|
||||||
func HandleZeroAddr(to common.Address) string {
|
func HandleZeroAddr(to common.Address) string {
|
||||||
if to.Hex() == "0x0000000000000000000000000000000000000000" {
|
if to == (common.Address{}) {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return to.Hex()
|
return to.String()
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
. "github.com/ethereum/go-ethereum/statediff/indexer/shared/schema"
|
. "github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testHeaderTable = Table{
|
var testHeaderTable = Table{
|
||||||
|
@ -21,21 +21,20 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetupTestData indexes a single mock block along with it's state nodes
|
// SetupTestData indexes a single mock block along with it's state nodes
|
||||||
@ -69,7 +68,7 @@ func SetupTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexHeaderIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexHeaderIPLDs(t *testing.T, db sql.Database) {
|
||||||
pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase
|
pgStr := `SELECT cid, cast(td AS TEXT), cast(reward AS TEXT), block_hash, coinbase
|
||||||
FROM eth.header_cids
|
FROM eth.header_cids
|
||||||
WHERE block_number = $1`
|
WHERE block_number = $1`
|
||||||
@ -107,7 +106,7 @@ func TestPublishAndIndexHeaderIPLDs(t *testing.T, db sql.Database) {
|
|||||||
require.Equal(t, mocks.MockHeaderRlp, data)
|
require.Equal(t, mocks.MockHeaderRlp, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||||
// check that txs were properly indexed and published
|
// check that txs were properly indexed and published
|
||||||
trxs := make([]string, 0)
|
trxs := make([]string, 0)
|
||||||
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
pgStr := `SELECT transaction_cids.cid FROM eth.transaction_cids INNER JOIN eth.header_cids ON (transaction_cids.header_id = header_cids.block_hash)
|
||||||
@ -209,7 +208,7 @@ func TestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) {
|
||||||
rcts := make([]string, 0)
|
rcts := make([]string, 0)
|
||||||
rctsPgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
rctsPgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
WHERE receipt_cids.tx_id = transaction_cids.tx_hash
|
||||||
@ -251,7 +250,7 @@ func TestPublishAndIndexLogIPLDs(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
||||||
// check receipts were properly indexed and published
|
// check receipts were properly indexed and published
|
||||||
rcts := make([]string, 0)
|
rcts := make([]string, 0)
|
||||||
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
pgStr := `SELECT receipt_cids.cid FROM eth.receipt_cids, eth.transaction_cids, eth.header_cids
|
||||||
@ -341,7 +340,7 @@ func TestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
|
||||||
// check that state nodes were properly indexed and published
|
// check that state nodes were properly indexed and published
|
||||||
stateNodes := make([]models.StateNodeModel, 0)
|
stateNodes := make([]models.StateNodeModel, 0)
|
||||||
pgStr := `SELECT state_cids.cid, CAST(state_cids.block_number as TEXT), state_cids.state_leaf_key, state_cids.removed,
|
pgStr := `SELECT state_cids.cid, CAST(state_cids.block_number as TEXT), state_cids.state_leaf_key, state_cids.removed,
|
||||||
@ -366,7 +365,7 @@ func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
if stateNode.CID == state1CID.String() {
|
if stateNode.CID == state1CID.String() {
|
||||||
require.Equal(t, false, stateNode.Removed)
|
require.Equal(t, false, stateNode.Removed)
|
||||||
require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).Hex(), stateNode.StateKey)
|
require.Equal(t, common.BytesToHash(mocks.ContractLeafKey).String(), stateNode.StateKey)
|
||||||
require.Equal(t, mocks.ContractLeafNode, data)
|
require.Equal(t, mocks.ContractLeafNode, data)
|
||||||
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
|
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
|
||||||
require.Equal(t, "0", stateNode.Balance)
|
require.Equal(t, "0", stateNode.Balance)
|
||||||
@ -377,7 +376,7 @@ func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
if stateNode.CID == state2CID.String() {
|
if stateNode.CID == state2CID.String() {
|
||||||
require.Equal(t, false, stateNode.Removed)
|
require.Equal(t, false, stateNode.Removed)
|
||||||
require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).Hex(), stateNode.StateKey)
|
require.Equal(t, common.BytesToHash(mocks.AccountLeafKey).String(), stateNode.StateKey)
|
||||||
require.Equal(t, mocks.AccountLeafNode, data)
|
require.Equal(t, mocks.AccountLeafNode, data)
|
||||||
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
|
require.Equal(t, mocks.BlockNumber.String(), stateNode.BlockNumber)
|
||||||
require.Equal(t, mocks.Balance.String(), stateNode.Balance)
|
require.Equal(t, mocks.Balance.String(), stateNode.Balance)
|
||||||
@ -412,11 +411,11 @@ func TestPublishAndIndexStateIPLDs(t *testing.T, db sql.Database) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if common.BytesToHash(mocks.RemovedLeafKey).Hex() == stateNode.StateKey {
|
if common.BytesToHash(mocks.RemovedLeafKey).String() == stateNode.StateKey {
|
||||||
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
|
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
|
||||||
require.Equal(t, true, stateNode.Removed)
|
require.Equal(t, true, stateNode.Removed)
|
||||||
require.Equal(t, []byte{}, data)
|
require.Equal(t, []byte{}, data)
|
||||||
} else if common.BytesToHash(mocks.Contract2LeafKey).Hex() == stateNode.StateKey {
|
} else if common.BytesToHash(mocks.Contract2LeafKey).String() == stateNode.StateKey {
|
||||||
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
|
require.Equal(t, shared.RemovedNodeStateCID, stateNode.CID)
|
||||||
require.Equal(t, true, stateNode.Removed)
|
require.Equal(t, true, stateNode.Removed)
|
||||||
require.Equal(t, []byte{}, data)
|
require.Equal(t, []byte{}, data)
|
||||||
@ -439,7 +438,7 @@ type StorageNodeModel struct {
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
|
||||||
// check that storage nodes were properly indexed
|
// check that storage nodes were properly indexed
|
||||||
storageNodes := make([]models.StorageNodeModel, 0)
|
storageNodes := make([]models.StorageNodeModel, 0)
|
||||||
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.header_id, storage_cids.cid,
|
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.header_id, storage_cids.cid,
|
||||||
@ -455,11 +454,11 @@ func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
|
|||||||
require.Equal(t, 1, len(storageNodes))
|
require.Equal(t, 1, len(storageNodes))
|
||||||
require.Equal(t, models.StorageNodeModel{
|
require.Equal(t, models.StorageNodeModel{
|
||||||
BlockNumber: mocks.BlockNumber.String(),
|
BlockNumber: mocks.BlockNumber.String(),
|
||||||
HeaderID: mockBlock.Header().Hash().Hex(),
|
HeaderID: mockBlock.Header().Hash().String(),
|
||||||
CID: storageCID.String(),
|
CID: storageCID.String(),
|
||||||
Removed: false,
|
Removed: false,
|
||||||
StorageKey: common.BytesToHash(mocks.StorageLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.StorageLeafKey).String(),
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).String(),
|
||||||
Value: mocks.StorageValue,
|
Value: mocks.StorageValue,
|
||||||
}, storageNodes[0])
|
}, storageNodes[0])
|
||||||
var data []byte
|
var data []byte
|
||||||
@ -489,29 +488,29 @@ func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
|
|||||||
expectedStorageNodes := []models.StorageNodeModel{ // TODO: ordering is non-deterministic
|
expectedStorageNodes := []models.StorageNodeModel{ // TODO: ordering is non-deterministic
|
||||||
{
|
{
|
||||||
BlockNumber: mocks.BlockNumber.String(),
|
BlockNumber: mocks.BlockNumber.String(),
|
||||||
HeaderID: mockBlock.Header().Hash().Hex(),
|
HeaderID: mockBlock.Header().Hash().String(),
|
||||||
CID: shared.RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
Removed: true,
|
Removed: true,
|
||||||
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.Storage2LeafKey).String(),
|
||||||
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).String(),
|
||||||
Value: []byte{},
|
Value: []byte{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockNumber: mocks.BlockNumber.String(),
|
BlockNumber: mocks.BlockNumber.String(),
|
||||||
HeaderID: mockBlock.Header().Hash().Hex(),
|
HeaderID: mockBlock.Header().Hash().String(),
|
||||||
CID: shared.RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
Removed: true,
|
Removed: true,
|
||||||
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.Storage3LeafKey).String(),
|
||||||
StateKey: common.BytesToHash(mocks.Contract2LeafKey).Hex(),
|
StateKey: common.BytesToHash(mocks.Contract2LeafKey).String(),
|
||||||
Value: []byte{},
|
Value: []byte{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
BlockNumber: mocks.BlockNumber.String(),
|
BlockNumber: mocks.BlockNumber.String(),
|
||||||
HeaderID: mockBlock.Header().Hash().Hex(),
|
HeaderID: mockBlock.Header().Hash().String(),
|
||||||
CID: shared.RemovedNodeStorageCID,
|
CID: shared.RemovedNodeStorageCID,
|
||||||
Removed: true,
|
Removed: true,
|
||||||
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).Hex(),
|
StorageKey: common.BytesToHash(mocks.RemovedLeafKey).String(),
|
||||||
StateKey: common.BytesToHash(mocks.ContractLeafKey).Hex(),
|
StateKey: common.BytesToHash(mocks.ContractLeafKey).String(),
|
||||||
Value: []byte{},
|
Value: []byte{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -690,7 +689,7 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database) {
|
||||||
// check indexed transactions
|
// check indexed transactions
|
||||||
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index,
|
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_hash, cid, dst, src, index,
|
||||||
tx_type, CAST(value as TEXT)
|
tx_type, CAST(value as TEXT)
|
||||||
@ -855,7 +854,7 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
|
||||||
// check indexed receipts
|
// check indexed receipts
|
||||||
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, cid, post_status, post_state, contract
|
pgStr := `SELECT CAST(block_number as TEXT), header_id, tx_id, cid, post_status, post_state, contract
|
||||||
FROM eth.receipt_cids
|
FROM eth.receipt_cids
|
||||||
@ -947,7 +946,7 @@ func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
|
||||||
// check indexed logs
|
// check indexed logs
|
||||||
pgStr := `SELECT address, topic0, topic1, topic2, topic3, data
|
pgStr := `SELECT address, topic0, topic1, topic2, topic3, data
|
||||||
FROM eth.log_cids
|
FROM eth.log_cids
|
||||||
@ -1002,7 +1001,7 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
for i, log := range mockRct.rct.Logs {
|
for i, log := range mockRct.rct.Logs {
|
||||||
topicSet := make([]string, 4)
|
topicSet := make([]string, 4)
|
||||||
for ti, topic := range log.Topics {
|
for ti, topic := range log.Topics {
|
||||||
topicSet[ti] = topic.Hex()
|
topicSet[ti] = topic.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedLog := models.LogsModel{
|
expectedLog := models.LogsModel{
|
||||||
@ -1021,7 +1020,7 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
|
||||||
// check indexed state nodes
|
// check indexed state nodes
|
||||||
pgStr := `SELECT state_leaf_key, removed, cid, diff
|
pgStr := `SELECT state_leaf_key, removed, cid, diff
|
||||||
FROM eth.state_cids
|
FROM eth.state_cids
|
||||||
@ -1035,7 +1034,7 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
expectedStateNodes := make([]models.StateNodeModel, 0)
|
expectedStateNodes := make([]models.StateNodeModel, 0)
|
||||||
for i, stateDiff := range mocks.StateDiffs {
|
for i, stateDiff := range mocks.StateDiffs {
|
||||||
expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{
|
expectedStateNodes = append(expectedStateNodes, models.StateNodeModel{
|
||||||
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
|
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).String(),
|
||||||
Removed: stateDiff.Removed,
|
Removed: stateDiff.Removed,
|
||||||
CID: stateNodeCIDs[i].String(),
|
CID: stateNodeCIDs[i].String(),
|
||||||
Diff: true,
|
Diff: true,
|
||||||
@ -1046,7 +1045,7 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0)
|
expectedNonCanonicalBlock2StateNodes := make([]models.StateNodeModel, 0)
|
||||||
for i, stateDiff := range mocks.StateDiffs[:2] {
|
for i, stateDiff := range mocks.StateDiffs[:2] {
|
||||||
expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{
|
expectedNonCanonicalBlock2StateNodes = append(expectedNonCanonicalBlock2StateNodes, models.StateNodeModel{
|
||||||
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
|
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).String(),
|
||||||
Removed: stateDiff.Removed,
|
Removed: stateDiff.Removed,
|
||||||
CID: stateNodeCIDs[i].String(),
|
CID: stateNodeCIDs[i].String(),
|
||||||
Diff: true,
|
Diff: true,
|
||||||
@ -1082,7 +1081,7 @@ func TestPublishAndIndexStateNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
assert.ElementsMatch(t, expectedNonCanonicalBlock2StateNodes, stateNodes)
|
assert.ElementsMatch(t, expectedNonCanonicalBlock2StateNodes, stateNodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
|
func DoTestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
|
||||||
// check indexed storage nodes
|
// check indexed storage nodes
|
||||||
pgStr := `SELECT storage_leaf_key, state_leaf_key, removed, cid, diff, val
|
pgStr := `SELECT storage_leaf_key, state_leaf_key, removed, cid, diff, val
|
||||||
FROM eth.storage_cids
|
FROM eth.storage_cids
|
||||||
@ -1098,8 +1097,8 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
for _, stateDiff := range mocks.StateDiffs {
|
for _, stateDiff := range mocks.StateDiffs {
|
||||||
for _, storageNode := range stateDiff.StorageDiff {
|
for _, storageNode := range stateDiff.StorageDiff {
|
||||||
expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{
|
expectedStorageNodes = append(expectedStorageNodes, models.StorageNodeModel{
|
||||||
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
|
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).String(),
|
||||||
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
|
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||||
Removed: storageNode.Removed,
|
Removed: storageNode.Removed,
|
||||||
CID: storageNodeCIDs[storageNodeIndex].String(),
|
CID: storageNodeCIDs[storageNodeIndex].String(),
|
||||||
Diff: true,
|
Diff: true,
|
||||||
@ -1115,8 +1114,8 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
|
|||||||
for _, stateDiff := range mocks.StateDiffs[:2] {
|
for _, stateDiff := range mocks.StateDiffs[:2] {
|
||||||
for _, storageNode := range stateDiff.StorageDiff {
|
for _, storageNode := range stateDiff.StorageDiff {
|
||||||
expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{
|
expectedNonCanonicalBlock2StorageNodes = append(expectedNonCanonicalBlock2StorageNodes, models.StorageNodeModel{
|
||||||
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).Hex(),
|
StateKey: common.BytesToHash(stateDiff.AccountWrapper.LeafKey).String(),
|
||||||
StorageKey: common.BytesToHash(storageNode.LeafKey).Hex(),
|
StorageKey: common.BytesToHash(storageNode.LeafKey).String(),
|
||||||
Removed: storageNode.Removed,
|
Removed: storageNode.Removed,
|
||||||
CID: storageNodeCIDs[storageNodeIndex].String(),
|
CID: storageNodeCIDs[storageNodeIndex].String(),
|
||||||
Diff: true,
|
Diff: true,
|
||||||
|
@ -18,18 +18,17 @@ package test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/models"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -51,11 +50,6 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("MODE") != "statediff" {
|
|
||||||
fmt.Println("Skipping statediff test")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// canonical block at LondonBlock height
|
// canonical block at LondonBlock height
|
||||||
mockBlock = mocks.MockBlock
|
mockBlock = mocks.MockBlock
|
||||||
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
|
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
|
||||||
|
@ -20,13 +20,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -19,11 +19,11 @@ package test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,10 +21,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||||
)
|
)
|
||||||
|
|
||||||
type res struct {
|
type res struct {
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListContainsString used to check if a list of strings contains a particular string
|
// ListContainsString used to check if a list of strings contains a particular string
|
||||||
@ -76,48 +76,24 @@ func TearDownDB(t *testing.T, db sql.Database) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.header_cids`)
|
statements := []string{
|
||||||
if err != nil {
|
`TRUNCATE nodes`,
|
||||||
|
`TRUNCATE ipld.blocks`,
|
||||||
|
`TRUNCATE eth.header_cids`,
|
||||||
|
`TRUNCATE eth.uncle_cids`,
|
||||||
|
`TRUNCATE eth.transaction_cids`,
|
||||||
|
`TRUNCATE eth.receipt_cids`,
|
||||||
|
`TRUNCATE eth.state_cids`,
|
||||||
|
`TRUNCATE eth.storage_cids`,
|
||||||
|
`TRUNCATE eth.log_cids`,
|
||||||
|
`TRUNCATE eth_meta.watched_addresses`,
|
||||||
|
}
|
||||||
|
for _, stm := range statements {
|
||||||
|
if _, err = tx.Exec(ctx, stm); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.uncle_cids`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
}
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.transaction_cids`)
|
if err = tx.Commit(ctx); err != nil {
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.receipt_cids`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.state_cids`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.storage_cids`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth.log_cids`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM ipld.blocks`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM nodes`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
_, err = tx.Exec(ctx, `DELETE FROM eth_meta.watched_addresses`)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err = tx.Commit(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
190
main/flags.go
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/dump"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/file"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Flags = *flag.NewFlagSet("statediff", flag.PanicOnError)
|
||||||
|
|
||||||
|
enableStatediff bool
|
||||||
|
config = statediff.Config{
|
||||||
|
Context: context.Background(),
|
||||||
|
}
|
||||||
|
dbType = shared.POSTGRES
|
||||||
|
dbDumpDst = dump.STDOUT
|
||||||
|
dbConfig = postgres.Config{Driver: postgres.PGX}
|
||||||
|
fileConfig = file.Config{Mode: file.CSV}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Flags.BoolVar(&enableStatediff,
|
||||||
|
"statediff", false,
|
||||||
|
"Enables the processing of state diffs between each block",
|
||||||
|
)
|
||||||
|
Flags.BoolVar(&config.EnableWriteLoop,
|
||||||
|
"statediff.writing", false,
|
||||||
|
"Activates progressive writing of state diffs to database as new blocks are synced",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&config.ID,
|
||||||
|
"statediff.db.nodeid", "",
|
||||||
|
"Node ID to use when writing state diffs to database",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&config.ClientName,
|
||||||
|
"statediff.db.clientname", "go-ethereum",
|
||||||
|
"Client name to use when writing state diffs to database",
|
||||||
|
)
|
||||||
|
Flags.UintVar(&config.NumWorkers,
|
||||||
|
"statediff.workers", 1,
|
||||||
|
"Number of concurrent workers to use during statediff processing (default 1)",
|
||||||
|
)
|
||||||
|
Flags.BoolVar(&config.WaitForSync,
|
||||||
|
"statediff.waitforsync", false,
|
||||||
|
"Should the statediff service wait for geth to catch up to the head of the chain?",
|
||||||
|
)
|
||||||
|
Flags.Uint64Var(&config.BackfillCheckPastBlocks,
|
||||||
|
"statediff.backfillcheckpastblocks", 7200,
|
||||||
|
"Number of blocks behind the startup statediff position to check (and fill) for gaps when head tracking",
|
||||||
|
)
|
||||||
|
Flags.Uint64Var(&config.BackfillMaxHeadGap,
|
||||||
|
"statediff.backfillmaxheadgap", 7200,
|
||||||
|
"Maximum gap between the startup statediff and startup head positions that can be backfilled",
|
||||||
|
)
|
||||||
|
|
||||||
|
Flags.Var(&dbType,
|
||||||
|
"statediff.db.type",
|
||||||
|
"Statediff database type (current options: postgres, file, dump)",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&dbDumpDst,
|
||||||
|
"statediff.dump.dst", "stdout",
|
||||||
|
"Statediff database dump destination (default is stdout)",
|
||||||
|
)
|
||||||
|
|
||||||
|
Flags.Var(&dbConfig.Driver,
|
||||||
|
"statediff.db.driver",
|
||||||
|
"Statediff database driver type",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&dbConfig.Hostname,
|
||||||
|
"statediff.db.host", "localhost",
|
||||||
|
"Statediff database hostname/ip",
|
||||||
|
)
|
||||||
|
Flags.IntVar(&dbConfig.Port,
|
||||||
|
"statediff.db.port", 5432,
|
||||||
|
"Statediff database port",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&dbConfig.DatabaseName,
|
||||||
|
"statediff.db.name", "",
|
||||||
|
"Statediff database name",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&dbConfig.Password,
|
||||||
|
"statediff.db.password", "",
|
||||||
|
"Statediff database password",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&dbConfig.Username,
|
||||||
|
"statediff.db.user", "postgres",
|
||||||
|
"Statediff database username",
|
||||||
|
)
|
||||||
|
Flags.DurationVar(&dbConfig.MaxConnLifetime,
|
||||||
|
"statediff.db.maxconnlifetime", 0,
|
||||||
|
"Statediff database maximum connection lifetime (in seconds)",
|
||||||
|
)
|
||||||
|
Flags.DurationVar(&dbConfig.MaxConnIdleTime,
|
||||||
|
"statediff.db.maxconnidletime", 0,
|
||||||
|
"Statediff database maximum connection idle time (in seconds)",
|
||||||
|
)
|
||||||
|
Flags.IntVar(&dbConfig.MaxConns,
|
||||||
|
"statediff.db.maxconns", 0,
|
||||||
|
"Statediff database maximum connections",
|
||||||
|
)
|
||||||
|
Flags.IntVar(&dbConfig.MinConns,
|
||||||
|
"statediff.db.minconns", 0,
|
||||||
|
"Statediff database minimum connections",
|
||||||
|
)
|
||||||
|
Flags.IntVar(&dbConfig.MaxIdle,
|
||||||
|
"statediff.db.maxidleconns", 0,
|
||||||
|
"Statediff database maximum idle connections",
|
||||||
|
)
|
||||||
|
Flags.DurationVar(&dbConfig.ConnTimeout,
|
||||||
|
"statediff.db.conntimeout", 0,
|
||||||
|
"Statediff database connection timeout (in seconds)",
|
||||||
|
)
|
||||||
|
Flags.BoolVar(&dbConfig.Upsert,
|
||||||
|
"statediff.db.upsert", false,
|
||||||
|
"Should the statediff service overwrite data existing in the database?",
|
||||||
|
)
|
||||||
|
Flags.BoolVar(&dbConfig.CopyFrom,
|
||||||
|
"statediff.db.copyfrom", false,
|
||||||
|
"Should the statediff service use COPY FROM for multiple inserts? (Note: pgx only)",
|
||||||
|
)
|
||||||
|
Flags.BoolVar(&dbConfig.LogStatements,
|
||||||
|
"statediff.db.logstatements", false,
|
||||||
|
"Should the statediff service log all database statements? (Note: pgx only)",
|
||||||
|
)
|
||||||
|
|
||||||
|
Flags.Var(&fileConfig.Mode,
|
||||||
|
"statediff.file.mode",
|
||||||
|
"Statediff file writing mode (current options: csv, sql)",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&fileConfig.OutputDir,
|
||||||
|
"statediff.file.csvdir", "",
|
||||||
|
"Full path of output directory to write statediff data out to when operating in csv file mode",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&fileConfig.FilePath,
|
||||||
|
"statediff.file.path", "",
|
||||||
|
"Full path (including filename) to write statediff data out to when operating in sql file mode",
|
||||||
|
)
|
||||||
|
Flags.StringVar(&fileConfig.WatchedAddressesFilePath,
|
||||||
|
"statediff.file.wapath", "",
|
||||||
|
"Full path (including filename) to write statediff watched addresses out to when operating in file mode",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetConfig() statediff.Config {
|
||||||
|
initConfig()
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func initConfig() {
|
||||||
|
if !enableStatediff {
|
||||||
|
config = statediff.Config{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ID == "" {
|
||||||
|
utils.Fatalf("Must specify node ID for statediff DB output")
|
||||||
|
}
|
||||||
|
|
||||||
|
var indexerConfig interfaces.Config
|
||||||
|
switch dbType {
|
||||||
|
case shared.FILE:
|
||||||
|
indexerConfig = fileConfig
|
||||||
|
case shared.POSTGRES:
|
||||||
|
dbConfig.ID = config.ID
|
||||||
|
dbConfig.ClientName = config.ClientName
|
||||||
|
indexerConfig = dbConfig
|
||||||
|
case shared.DUMP:
|
||||||
|
switch dbDumpDst {
|
||||||
|
case dump.STDERR:
|
||||||
|
indexerConfig = dump.Config{Dump: os.Stdout}
|
||||||
|
case dump.STDOUT:
|
||||||
|
indexerConfig = dump.Config{Dump: os.Stderr}
|
||||||
|
case dump.DISCARD:
|
||||||
|
indexerConfig = dump.Config{Dump: dump.Discard}
|
||||||
|
default:
|
||||||
|
utils.Fatalf("unrecognized dump destination: %s", dbDumpDst)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
utils.Fatalf("unrecognized database type: %s", dbType)
|
||||||
|
}
|
||||||
|
config.IndexerConfig = indexerConfig
|
||||||
|
}
|
80
main/main.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
geth_flags "github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/restricted"
|
||||||
|
|
||||||
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
ind "github.com/cerc-io/plugeth-statediff/indexer"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pluginLoader core.PluginLoader
|
||||||
|
gethContext core.Context
|
||||||
|
service *statediff.Service
|
||||||
|
blockchain statediff.BlockChain
|
||||||
|
)
|
||||||
|
|
||||||
|
func Initialize(ctx core.Context, pl core.PluginLoader, logger core.Logger) {
|
||||||
|
log.SetDefaultLogger(logger)
|
||||||
|
|
||||||
|
pluginLoader = pl
|
||||||
roysc marked this conversation as resolved
Outdated
i-norden
commented
Is this from geth upstream or here for future use? If not, lets remove the commented out stuff. Is this from geth upstream or here for future use? If not, lets remove the commented out stuff.
roysc
commented
no, just left over from debugging logging no, just left over from debugging logging
|
|||||||
|
gethContext = ctx
|
||||||
|
|
||||||
|
log.Debug("Initialized statediff plugin")
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitializeNode(stack core.Node, b core.Backend) {
|
||||||
|
backend := b.(restricted.Backend)
|
||||||
|
|
||||||
|
networkid, err := strconv.ParseUint(gethContext.String(geth_flags.NetworkIdFlag.Name), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("cannot parse network ID", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
serviceConfig := GetConfig()
|
||||||
|
blockchain = statediff.NewPluginBlockChain(backend)
|
||||||
|
|
||||||
|
var indexer interfaces.StateDiffIndexer
|
||||||
|
if serviceConfig.IndexerConfig != nil {
|
||||||
|
info := node.Info{
|
||||||
|
GenesisBlock: blockchain.GetBlockByNumber(0).Hash().String(),
|
||||||
|
NetworkID: strconv.FormatUint(networkid, 10),
|
||||||
|
ChainID: backend.ChainConfig().ChainID.Uint64(),
|
||||||
|
ID: serviceConfig.ID,
|
||||||
|
ClientName: serviceConfig.ClientName,
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
_, indexer, err = ind.NewStateDiffIndexer(serviceConfig.Context,
|
||||||
|
adapt.ChainConfig(backend.ChainConfig()), info, serviceConfig.IndexerConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to construct indexer", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
service, err := statediff.NewService(serviceConfig, blockchain, backend, indexer)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("failed to construct service", "error", err)
|
||||||
|
}
|
||||||
|
if err = service.Start(); err != nil {
|
||||||
|
log.Error("failed to start service", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetAPIs(stack core.Node, backend core.Backend) []core.API {
|
||||||
|
return []core.API{
|
||||||
|
{
|
||||||
|
Namespace: statediff.APIName,
|
||||||
|
Version: statediff.APIVersion,
|
||||||
|
Service: statediff.NewPublicAPI(service),
|
||||||
|
Public: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
@ -18,13 +18,10 @@ package statediff_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -36,12 +33,18 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
|
||||||
ipld2 "github.com/ethereum/go-ethereum/statediff/indexer/ipld"
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
"github.com/ethereum/go-ethereum/statediff/test_helpers"
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/test_helpers"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
test_helpers.SilenceLogs()
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
genesisBlock, block0, block1, block2, block3 *types.Block
|
genesisBlock, block0, block1, block2, block3 *types.Block
|
||||||
@ -421,10 +424,6 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("MODE") != "statediff" {
|
|
||||||
fmt.Println("Skipping statediff test")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
genesisBlock = core.DefaultGenesisBlock().MustCommit(db)
|
genesisBlock = core.DefaultGenesisBlock().MustCommit(db)
|
||||||
genBy, err := rlp.EncodeToBytes(genesisBlock)
|
genBy, err := rlp.EncodeToBytes(genesisBlock)
|
||||||
@ -480,13 +479,8 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
params := statediff.Params{}
|
params := statediff.Params{}
|
||||||
builder = statediff.NewBuilder(chain.StateCache())
|
|
||||||
|
|
||||||
var tests = []struct {
|
var tests = []test_helpers.TestCase{
|
||||||
name string
|
|
||||||
startingArguments statediff.Args
|
|
||||||
expected *sdtypes.StateObject
|
|
||||||
}{
|
|
||||||
// note that block0 (genesis) has over 1000 nodes due to the pre-allocation for the crowd-sale
|
// note that block0 (genesis) has over 1000 nodes due to the pre-allocation for the crowd-sale
|
||||||
// it is not feasible to write a unit test of that size at this time
|
// it is not feasible to write a unit test of that size at this time
|
||||||
{
|
{
|
||||||
@ -507,26 +501,26 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
AccountWrapper: sdtypes.AccountWrapper{
|
AccountWrapper: sdtypes.AccountWrapper{
|
||||||
Account: block1CoinbaseAccount,
|
Account: block1CoinbaseAccount,
|
||||||
LeafKey: block1CoinbaseHash.Bytes(),
|
LeafKey: block1CoinbaseHash.Bytes(),
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
|
||||||
},
|
},
|
||||||
StorageDiff: emptyStorage,
|
StorageDiff: emptyStorage,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
IPLDs: []sdtypes.IPLD{
|
IPLDs: []sdtypes.IPLD{
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1RootBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block1RootBranchNode)).String(),
|
||||||
Content: block1RootBranchNode,
|
Content: block1RootBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1x04BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block1x04BranchNode)).String(),
|
||||||
Content: block1x04BranchNode,
|
Content: block1x04BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1x040bBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block1x040bBranchNode)).String(),
|
||||||
Content: block1x040bBranchNode,
|
Content: block1x040bBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block1CoinbaseLeafNode)).String(),
|
||||||
Content: block1CoinbaseLeafNode,
|
Content: block1CoinbaseLeafNode,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -552,34 +546,34 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
AccountWrapper: sdtypes.AccountWrapper{
|
AccountWrapper: sdtypes.AccountWrapper{
|
||||||
Account: block2CoinbaseAccount,
|
Account: block2CoinbaseAccount,
|
||||||
LeafKey: block2CoinbaseHash.Bytes(),
|
LeafKey: block2CoinbaseHash.Bytes(),
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
|
||||||
},
|
},
|
||||||
StorageDiff: emptyStorage,
|
StorageDiff: emptyStorage,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
IPLDs: []sdtypes.IPLD{
|
IPLDs: []sdtypes.IPLD{
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2RootBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2RootBranchNode)).String(),
|
||||||
Content: block2RootBranchNode,
|
Content: block2RootBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x00BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2x00BranchNode)).String(),
|
||||||
Content: block2x00BranchNode,
|
Content: block2x00BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x0008BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2x0008BranchNode)).String(),
|
||||||
Content: block2x0008BranchNode,
|
Content: block2x0008BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2x00080dBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2x00080dBranchNode)).String(),
|
||||||
Content: block2x00080dBranchNode,
|
Content: block2x00080dBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2MovedPremineLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2MovedPremineLeafNode)).String(),
|
||||||
Content: block2MovedPremineLeafNode,
|
Content: block2MovedPremineLeafNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block2CoinbaseLeafNode)).String(),
|
||||||
Content: block2CoinbaseLeafNode,
|
Content: block2CoinbaseLeafNode,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -604,7 +598,7 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
AccountWrapper: sdtypes.AccountWrapper{
|
AccountWrapper: sdtypes.AccountWrapper{
|
||||||
Account: block3MovedPremineAccount1,
|
Account: block3MovedPremineAccount1,
|
||||||
LeafKey: common.HexToHash("ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190").Bytes(),
|
LeafKey: common.HexToHash("ce573ced93917e658d10e2d9009470dad72b63c898d173721194a12f2ae5e190").Bytes(),
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
|
||||||
},
|
},
|
||||||
StorageDiff: emptyStorage,
|
StorageDiff: emptyStorage,
|
||||||
},
|
},
|
||||||
@ -613,50 +607,50 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
AccountWrapper: sdtypes.AccountWrapper{
|
AccountWrapper: sdtypes.AccountWrapper{
|
||||||
Account: block3CoinbaseAccount,
|
Account: block3CoinbaseAccount,
|
||||||
LeafKey: block3CoinbaseHash.Bytes(),
|
LeafKey: block3CoinbaseHash.Bytes(),
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
|
||||||
},
|
},
|
||||||
StorageDiff: emptyStorage,
|
StorageDiff: emptyStorage,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
IPLDs: []sdtypes.IPLD{
|
IPLDs: []sdtypes.IPLD{
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3RootBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3RootBranchNode)).String(),
|
||||||
Content: block3RootBranchNode,
|
Content: block3RootBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x06BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x06BranchNode)).String(),
|
||||||
Content: block3x06BranchNode,
|
Content: block3x06BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x060eBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x060eBranchNode)).String(),
|
||||||
Content: block3x060eBranchNode,
|
Content: block3x060eBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0cBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x0cBranchNode)).String(),
|
||||||
Content: block3x0cBranchNode,
|
Content: block3x0cBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0eBranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x0c0eBranchNode)).String(),
|
||||||
Content: block3x0c0eBranchNode,
|
Content: block3x0c0eBranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0e05BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x0c0e05BranchNode)).String(),
|
||||||
Content: block3x0c0e05BranchNode,
|
Content: block3x0c0e05BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3x0c0e0507BranchNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3x0c0e0507BranchNode)).String(),
|
||||||
Content: block3x0c0e0507BranchNode,
|
Content: block3x0c0e0507BranchNode,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode1)).String(),
|
||||||
Content: block3MovedPremineLeafNode1,
|
Content: block3MovedPremineLeafNode1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode2)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3MovedPremineLeafNode2)).String(),
|
||||||
Content: block3MovedPremineLeafNode2,
|
Content: block3MovedPremineLeafNode2,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
CID: ipld2.Keccak256ToCid(ipld2.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
|
CID: ipld.Keccak256ToCid(ipld.MEthStateTrie, crypto.Keccak256(block3CoinbaseLeafNode)).String(),
|
||||||
Content: block3CoinbaseLeafNode,
|
Content: block3CoinbaseLeafNode,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -664,41 +658,11 @@ func TestBuilderOnMainnetBlocks(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
test_helpers.RunBuilderTests(t,
|
||||||
diff, err := builder.BuildStateDiffObject(test.startingArguments, params)
|
statediff.NewBuilder(adapt.GethStateView(chain.StateCache())),
|
||||||
if err != nil {
|
tests, params, test_helpers.CheckedRoots{
|
||||||
t.Error(err)
|
block1: block1RootBranchNode,
|
||||||
}
|
block2: block2RootBranchNode,
|
||||||
receivedStateDiffRlp, err := rlp.EncodeToBytes(diff)
|
block3: block3RootBranchNode,
|
||||||
if err != nil {
|
})
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
expectedStateDiffRlp, err := rlp.EncodeToBytes(&test.expected)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
sort.Slice(receivedStateDiffRlp, func(i, j int) bool { return receivedStateDiffRlp[i] < receivedStateDiffRlp[j] })
|
|
||||||
sort.Slice(expectedStateDiffRlp, func(i, j int) bool { return expectedStateDiffRlp[i] < expectedStateDiffRlp[j] })
|
|
||||||
if !bytes.Equal(receivedStateDiffRlp, expectedStateDiffRlp) {
|
|
||||||
actual, err := json.Marshal(diff)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
expected, err := json.Marshal(test.expected)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
t.Logf("Test failed: %s", test.name)
|
|
||||||
t.Errorf("actual state diff: %s\r\n\r\n\r\nexpected state diff: %s", actual, expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !bytes.Equal(crypto.Keccak256(block1RootBranchNode), block1.Root().Bytes()) {
|
|
||||||
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block1RootBranchNode), block1.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(crypto.Keccak256(block2RootBranchNode), block2.Root().Bytes()) {
|
|
||||||
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block2RootBranchNode), block2.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(crypto.Keccak256(block3RootBranchNode), block3.Root().Bytes()) {
|
|
||||||
t.Errorf("actual state root: %s\r\nexpected state root: %s", crypto.Keccak256(block3RootBranchNode), block3.Root().Bytes())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -17,45 +17,45 @@
|
|||||||
package statediff
|
package statediff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func countStateDiffBegin(block *types.Block) (time.Time, log.Logger) {
|
func countStateDiffBegin(block *types.Block, logger log.Logger) time.Time {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
logger := log.New("hash", block.Hash().Hex(), "number", block.NumberU64())
|
|
||||||
|
|
||||||
defaultStatediffMetrics.underway.Inc(1)
|
defaultStatediffMetrics.underway.Inc(1)
|
||||||
logger.Debug(fmt.Sprintf("writeStateDiff BEGIN [underway=%d, succeeded=%d, failed=%d, total_time=%dms]",
|
logger.Debug("writeStateDiff BEGIN",
|
||||||
defaultStatediffMetrics.underway.Count(),
|
"underway", defaultStatediffMetrics.underway.Count(),
|
||||||
defaultStatediffMetrics.succeeded.Count(),
|
"succeeded", defaultStatediffMetrics.succeeded.Count(),
|
||||||
defaultStatediffMetrics.failed.Count(),
|
"failed", defaultStatediffMetrics.failed.Count(),
|
||||||
defaultStatediffMetrics.totalProcessingTime.Value(),
|
"total_time", defaultStatediffMetrics.totalProcessingTime.Value(),
|
||||||
))
|
)
|
||||||
|
|
||||||
return start, logger
|
return start
|
||||||
}
|
}
|
||||||
|
|
||||||
func countStateDiffEnd(start time.Time, logger log.Logger, err error) time.Duration {
|
func countStateDiffEnd(start time.Time, logger log.Logger, err *error) time.Duration {
|
||||||
duration := time.Since(start)
|
duration := time.Since(start)
|
||||||
defaultStatediffMetrics.underway.Dec(1)
|
defaultStatediffMetrics.underway.Dec(1)
|
||||||
if nil == err {
|
failed := nil != err && nil != *err
|
||||||
defaultStatediffMetrics.succeeded.Inc(1)
|
if failed {
|
||||||
} else {
|
|
||||||
defaultStatediffMetrics.failed.Inc(1)
|
defaultStatediffMetrics.failed.Inc(1)
|
||||||
|
} else {
|
||||||
|
defaultStatediffMetrics.succeeded.Inc(1)
|
||||||
}
|
}
|
||||||
defaultStatediffMetrics.totalProcessingTime.Inc(duration.Milliseconds())
|
defaultStatediffMetrics.totalProcessingTime.Inc(duration.Milliseconds())
|
||||||
|
|
||||||
logger.Debug(fmt.Sprintf("writeStateDiff END (duration=%dms, err=%t) [underway=%d, succeeded=%d, failed=%d, total_time=%dms]",
|
logger.Debug("writeStateDiff END",
|
||||||
duration.Milliseconds(), nil != err,
|
"duration", duration,
|
||||||
defaultStatediffMetrics.underway.Count(),
|
"error", failed,
|
||||||
defaultStatediffMetrics.succeeded.Count(),
|
"underway", defaultStatediffMetrics.underway.Count(),
|
||||||
defaultStatediffMetrics.failed.Count(),
|
"succeeded", defaultStatediffMetrics.succeeded.Count(),
|
||||||
defaultStatediffMetrics.totalProcessingTime.Value(),
|
"failed", defaultStatediffMetrics.failed.Count(),
|
||||||
))
|
"total_time", defaultStatediffMetrics.totalProcessingTime.Value(),
|
||||||
|
)
|
||||||
|
|
||||||
return duration
|
return duration
|
||||||
}
|
}
|
||||||
@ -67,10 +67,10 @@ func countApiRequestBegin(methodName string, blockHashOrNumber interface{}) (tim
|
|||||||
defaultStatediffMetrics.apiRequests.Inc(1)
|
defaultStatediffMetrics.apiRequests.Inc(1)
|
||||||
defaultStatediffMetrics.apiRequestsUnderway.Inc(1)
|
defaultStatediffMetrics.apiRequestsUnderway.Inc(1)
|
||||||
|
|
||||||
logger.Debug(fmt.Sprintf("statediff API BEGIN [underway=%d, requests=%d])",
|
logger.Debug("statediff API BEGIN",
|
||||||
defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
"underway", defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
||||||
defaultStatediffMetrics.apiRequests.Count(),
|
"requests", defaultStatediffMetrics.apiRequests.Count(),
|
||||||
))
|
)
|
||||||
|
|
||||||
return start, logger
|
return start, logger
|
||||||
}
|
}
|
||||||
@ -79,11 +79,12 @@ func countApiRequestEnd(start time.Time, logger log.Logger, err error) time.Dura
|
|||||||
duration := time.Since(start)
|
duration := time.Since(start)
|
||||||
defaultStatediffMetrics.apiRequestsUnderway.Dec(1)
|
defaultStatediffMetrics.apiRequestsUnderway.Dec(1)
|
||||||
|
|
||||||
logger.Debug(fmt.Sprintf("statediff API END (duration=%dms, err=%t) [underway=%d, requests=%d]",
|
logger.Debug("statediff API END",
|
||||||
duration.Milliseconds(), nil != err,
|
"duration", duration,
|
||||||
defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
"error", err != nil,
|
||||||
defaultStatediffMetrics.apiRequests.Count(),
|
"underway", defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
||||||
))
|
"requests", defaultStatediffMetrics.apiRequests.Count(),
|
||||||
|
)
|
||||||
|
|
||||||
return duration
|
return duration
|
||||||
}
|
}
|
||||||
|
27
scripts/integration-setup.sh
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
cluster="${1:-test}"
|
||||||
|
laconic_so="${LACONIC_SO:-laconic-so} --stack fixturenet-plugeth-tx --verbose"
|
||||||
|
|
||||||
|
CONFIG_DIR=$(readlink -f "${CONFIG_DIR:-$(mktemp -d)}")
|
||||||
|
|
||||||
|
# By default assume we are running in the project root
|
||||||
|
export CERC_REPO_BASE_DIR="${CERC_REPO_BASE_DIR:-$(realpath $(git rev-parse --show-toplevel)/..)}"
|
||||||
|
|
||||||
|
# v5 migrations only go up to version 18
|
||||||
|
echo CERC_STATEDIFF_DB_GOOSE_MIN_VER=18 >> $CONFIG_DIR/stack.env
|
||||||
|
|
||||||
|
# Build and deploy a cluster with only what we need from the stack
|
||||||
|
$laconic_so setup-repositories \
|
||||||
|
--exclude github.com/dboreham/foundry,github.com/cerc-io/tx-spammer,github.com/cerc-io/ipld-eth-server,git.vdb.to/cerc-io/plugeth,git.vdb.to/cerc-io/plugeth-statediff \
|
||||||
|
--branches-file ./test/stack-refs.txt
|
||||||
|
|
||||||
|
$laconic_so build-containers \
|
||||||
|
--exclude cerc/ipld-eth-server,cerc/keycloak,cerc/tx-spammer,cerc/foundry,cerc/plugeth,cerc/plugeth-statediff
|
||||||
|
|
||||||
|
$laconic_so deploy \
|
||||||
|
--exclude foundry,keycloak,tx-spammer,ipld-eth-server \
|
||||||
|
--env-file $CONFIG_DIR/stack.env \
|
||||||
|
--cluster "$cluster" up
|
946
service.go
327
service_test.go
@ -17,33 +17,36 @@
|
|||||||
package statediff_test
|
package statediff_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
statediff "github.com/ethereum/go-ethereum/statediff"
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/test_helpers/mocks"
|
|
||||||
types2 "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
|
||||||
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/test_helpers"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/test_helpers/mocks"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
test_helpers.SilenceLogs()
|
||||||
|
}
|
||||||
|
|
||||||
func TestServiceLoop(t *testing.T) {
|
func TestServiceLoop(t *testing.T) {
|
||||||
testErrorInChainEventLoop(t)
|
t.Run("error in chain event loop", testErrorInChainEventLoop)
|
||||||
testErrorInBlockLoop(t)
|
t.Run("error in block loop", testErrorInBlockLoop)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -99,17 +102,17 @@ func testErrorInChainEventLoop(t *testing.T) {
|
|||||||
blockChain := mocks.BlockChain{}
|
blockChain := mocks.BlockChain{}
|
||||||
serviceQuit := make(chan bool)
|
serviceQuit := make(chan bool)
|
||||||
service := statediff.Service{
|
service := statediff.Service{
|
||||||
Mutex: sync.Mutex{},
|
|
||||||
Builder: &builder,
|
Builder: &builder,
|
||||||
BlockChain: &blockChain,
|
BlockChain: &blockChain,
|
||||||
QuitChan: serviceQuit,
|
QuitChan: serviceQuit,
|
||||||
Subscriptions: make(map[common.Hash]map[rpc.ID]statediff.Subscription),
|
Subscriptions: make(map[common.Hash]map[statediff.SubID]statediff.Subscription),
|
||||||
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
||||||
BlockCache: statediff.NewBlockCache(1),
|
BlockCache: statediff.NewBlockCache(1),
|
||||||
}
|
}
|
||||||
payloadChan := make(chan statediff.Payload, 2)
|
payloadChan := make(chan statediff.Payload, 2)
|
||||||
quitChan := make(chan bool)
|
quitChan := make(chan bool)
|
||||||
service.Subscribe(rpc.NewID(), payloadChan, quitChan, defaultParams)
|
service.Subscribe(payloadChan, quitChan, defaultParams)
|
||||||
|
// FIXME why is this here?
|
||||||
testRoot2 = common.HexToHash("0xTestRoot2")
|
testRoot2 = common.HexToHash("0xTestRoot2")
|
||||||
blockMapping := make(map[common.Hash]*types.Block)
|
blockMapping := make(map[common.Hash]*types.Block)
|
||||||
blockMapping[parentBlock1.Hash()] = parentBlock1
|
blockMapping[parentBlock1.Hash()] = parentBlock1
|
||||||
@ -132,12 +135,9 @@ func testErrorInChainEventLoop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
service.Loop(eventsChannel)
|
service.PublishLoop(eventsChannel)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if len(payloads) != 2 {
|
require.Equal(t, 2, len(payloads), "number of payloads")
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual number of payloads does not equal expected.\nactual: %+v\nexpected: 3", len(payloads))
|
|
||||||
}
|
|
||||||
|
|
||||||
testReceipts1Rlp, err := rlp.EncodeToBytes(&testReceipts1)
|
testReceipts1Rlp, err := rlp.EncodeToBytes(&testReceipts1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -149,34 +149,16 @@ func testErrorInChainEventLoop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
expectedReceiptsRlp := [][]byte{testReceipts1Rlp, testReceipts2Rlp, nil}
|
expectedReceiptsRlp := [][]byte{testReceipts1Rlp, testReceipts2Rlp, nil}
|
||||||
for i, payload := range payloads {
|
for i, payload := range payloads {
|
||||||
if !bytes.Equal(payload.ReceiptsRlp, expectedReceiptsRlp[i]) {
|
require.Equal(t, expectedReceiptsRlp[i], payload.ReceiptsRlp, "payload %d", i)
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual receipt rlp for payload %d does not equal expected.\nactual: %+v\nexpected: %+v", i, payload.ReceiptsRlp, expectedReceiptsRlp[i])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(builder.Params, defaultParams) {
|
require.Equal(t, builder.Params, defaultParams)
|
||||||
t.Error("Test failure:", t.Name())
|
require.Equal(t, testBlock2.Hash(), builder.Args.BlockHash)
|
||||||
t.Logf("Actual params does not equal expected.\nactual:%+v\nexpected: %+v", builder.Params, defaultParams)
|
require.Equal(t, parentBlock2.Root(), builder.Args.OldStateRoot)
|
||||||
}
|
require.Equal(t, testBlock2.Root(), builder.Args.NewStateRoot)
|
||||||
if !bytes.Equal(builder.Args.BlockHash.Bytes(), testBlock2.Hash().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual blockhash does not equal expected.\nactual:%x\nexpected: %x", builder.Args.BlockHash.Bytes(), testBlock2.Hash().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.OldStateRoot.Bytes(), parentBlock2.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual root does not equal expected.\nactual:%x\nexpected: %x", builder.Args.OldStateRoot.Bytes(), parentBlock2.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.NewStateRoot.Bytes(), testBlock2.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual root does not equal expected.\nactual:%x\nexpected: %x", builder.Args.NewStateRoot.Bytes(), testBlock2.Root().Bytes())
|
|
||||||
}
|
|
||||||
//look up the parent block from its hash
|
//look up the parent block from its hash
|
||||||
expectedHashes := []common.Hash{testBlock1.ParentHash(), testBlock2.ParentHash()}
|
expectedHashes := []common.Hash{testBlock1.ParentHash(), testBlock2.ParentHash()}
|
||||||
if !reflect.DeepEqual(blockChain.HashesLookedUp, expectedHashes) {
|
require.Equal(t, expectedHashes, blockChain.HashesLookedUp)
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual looked up parent hashes does not equal expected.\nactual:%+v\nexpected: %+v", blockChain.HashesLookedUp, expectedHashes)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testErrorInBlockLoop(t *testing.T) {
|
func testErrorInBlockLoop(t *testing.T) {
|
||||||
@ -187,13 +169,13 @@ func testErrorInBlockLoop(t *testing.T) {
|
|||||||
Builder: &builder,
|
Builder: &builder,
|
||||||
BlockChain: &blockChain,
|
BlockChain: &blockChain,
|
||||||
QuitChan: make(chan bool),
|
QuitChan: make(chan bool),
|
||||||
Subscriptions: make(map[common.Hash]map[rpc.ID]statediff.Subscription),
|
Subscriptions: make(map[common.Hash]map[statediff.SubID]statediff.Subscription),
|
||||||
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
||||||
BlockCache: statediff.NewBlockCache(1),
|
BlockCache: statediff.NewBlockCache(1),
|
||||||
}
|
}
|
||||||
payloadChan := make(chan statediff.Payload)
|
payloadChan := make(chan statediff.Payload)
|
||||||
quitChan := make(chan bool)
|
quitChan := make(chan bool)
|
||||||
service.Subscribe(rpc.NewID(), payloadChan, quitChan, defaultParams)
|
service.Subscribe(payloadChan, quitChan, defaultParams)
|
||||||
blockMapping := make(map[common.Hash]*types.Block)
|
blockMapping := make(map[common.Hash]*types.Block)
|
||||||
blockMapping[parentBlock1.Hash()] = parentBlock1
|
blockMapping[parentBlock1.Hash()] = parentBlock1
|
||||||
blockChain.SetBlocksForHashes(blockMapping)
|
blockChain.SetBlocksForHashes(blockMapping)
|
||||||
@ -205,28 +187,16 @@ func testErrorInBlockLoop(t *testing.T) {
|
|||||||
case <-quitChan:
|
case <-quitChan:
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
service.Loop(eventsChannel)
|
service.PublishLoop(eventsChannel)
|
||||||
|
|
||||||
if !reflect.DeepEqual(builder.Params, defaultParams) {
|
require.Equal(t, defaultParams, builder.Params)
|
||||||
t.Error("Test failure:", t.Name())
|
require.Equal(t, testBlock1.Hash(), builder.Args.BlockHash)
|
||||||
t.Logf("Actual params does not equal expected.\nactual:%+v\nexpected: %+v", builder.Params, defaultParams)
|
require.Equal(t, parentBlock1.Root(), builder.Args.OldStateRoot)
|
||||||
}
|
require.Equal(t, testBlock1.Root(), builder.Args.NewStateRoot)
|
||||||
if !bytes.Equal(builder.Args.BlockHash.Bytes(), testBlock1.Hash().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual blockhash does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.BlockHash.Bytes(), testBlock1.Hash().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.OldStateRoot.Bytes(), parentBlock1.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual old state root does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.OldStateRoot.Bytes(), parentBlock1.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.NewStateRoot.Bytes(), testBlock1.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual new state root does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.NewStateRoot.Bytes(), testBlock1.Root().Bytes())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetStateDiffAt(t *testing.T) {
|
func TestGetStateDiffAt(t *testing.T) {
|
||||||
mockStateDiff := types2.StateObject{
|
mockStateDiff := sdtypes.StateObject{
|
||||||
BlockNumber: testBlock1.Number(),
|
BlockNumber: testBlock1.Number(),
|
||||||
BlockHash: testBlock1.Hash(),
|
BlockHash: testBlock1.Hash(),
|
||||||
}
|
}
|
||||||
@ -260,11 +230,10 @@ func TestGetStateDiffAt(t *testing.T) {
|
|||||||
blockChain.SetBlockForNumber(testBlock1, testBlock1.NumberU64())
|
blockChain.SetBlockForNumber(testBlock1, testBlock1.NumberU64())
|
||||||
blockChain.SetReceiptsForHash(testBlock1.Hash(), testReceipts1)
|
blockChain.SetReceiptsForHash(testBlock1.Hash(), testReceipts1)
|
||||||
service := statediff.Service{
|
service := statediff.Service{
|
||||||
Mutex: sync.Mutex{},
|
|
||||||
Builder: &builder,
|
Builder: &builder,
|
||||||
BlockChain: &blockChain,
|
BlockChain: &blockChain,
|
||||||
QuitChan: make(chan bool),
|
QuitChan: make(chan bool),
|
||||||
Subscriptions: make(map[common.Hash]map[rpc.ID]statediff.Subscription),
|
Subscriptions: make(map[common.Hash]map[statediff.SubID]statediff.Subscription),
|
||||||
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
||||||
BlockCache: statediff.NewBlockCache(1),
|
BlockCache: statediff.NewBlockCache(1),
|
||||||
}
|
}
|
||||||
@ -277,63 +246,37 @@ func TestGetStateDiffAt(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(builder.Params, defaultParams) {
|
require.Equal(t, defaultParams, builder.Params)
|
||||||
t.Error("Test failure:", t.Name())
|
require.Equal(t, testBlock1.Hash(), builder.Args.BlockHash)
|
||||||
t.Logf("Actual params does not equal expected.\nactual:%+v\nexpected: %+v", builder.Params, defaultParams)
|
require.Equal(t, parentBlock1.Root(), builder.Args.OldStateRoot)
|
||||||
}
|
require.Equal(t, testBlock1.Root(), builder.Args.NewStateRoot)
|
||||||
if !bytes.Equal(builder.Args.BlockHash.Bytes(), testBlock1.Hash().Bytes()) {
|
require.Equal(t, stateDiffPayloadRlp, expectedStateDiffPayloadRlp)
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual blockhash does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.BlockHash.Bytes(), testBlock1.Hash().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.OldStateRoot.Bytes(), parentBlock1.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual old state root does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.OldStateRoot.Bytes(), parentBlock1.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(builder.Args.NewStateRoot.Bytes(), testBlock1.Root().Bytes()) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual new state root does not equal expected.\nactual:%+v\nexpected: %x", builder.Args.NewStateRoot.Bytes(), testBlock1.Root().Bytes())
|
|
||||||
}
|
|
||||||
if !bytes.Equal(expectedStateDiffPayloadRlp, stateDiffPayloadRlp) {
|
|
||||||
t.Error("Test failure:", t.Name())
|
|
||||||
t.Logf("Actual state diff payload does not equal expected.\nactual:%+v\nexpected: %+v", expectedStateDiffPayload, stateDiffPayload)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type writeSub struct {
|
type writeSub struct {
|
||||||
sub *rpc.ClientSubscription
|
ch <-chan statediff.JobStatus
|
||||||
statusChan <-chan statediff.JobStatus
|
unsubscribe func()
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeClient(svc *statediff.Service) *rpc.Client {
|
func subscribeWritesService(t *testing.T, api *statediff.PublicAPI) writeSub {
|
||||||
server := rpc.NewServer()
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
api := statediff.NewPublicStateDiffAPI(svc)
|
sub, err := api.StreamWrites(ctx)
|
||||||
err := server.RegisterName("statediff", api)
|
require.NoError(t, err)
|
||||||
if err != nil {
|
return writeSub{sub, cancel}
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return rpc.DialInProc(server)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// awaitStatus awaits status update for writeStateDiffAt job
|
func (ws writeSub) awaitStatus(job statediff.JobID, timeout time.Duration) (bool, error) {
|
||||||
func subscribeWrites(client *rpc.Client) (writeSub, error) {
|
deadline := time.After(timeout)
|
||||||
statusChan := make(chan statediff.JobStatus)
|
|
||||||
sub, err := client.Subscribe(context.Background(), "statediff", statusChan, "streamWrites")
|
|
||||||
return writeSub{sub, statusChan}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws writeSub) await(job statediff.JobID, timeout time.Duration) (bool, error) {
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case err := <-ws.sub.Err():
|
case status := <-ws.ch:
|
||||||
return false, err
|
|
||||||
case status := <-ws.statusChan:
|
|
||||||
if status.Err != nil {
|
if status.Err != nil {
|
||||||
return false, status.Err
|
return false, status.Err
|
||||||
}
|
}
|
||||||
if status.ID == job {
|
if status.ID == job {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
case <-time.After(timeout):
|
case <-deadline:
|
||||||
return false, errors.New("timeout")
|
return false, errors.New("timeout")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -349,21 +292,20 @@ func TestWriteStateDiffAt(t *testing.T) {
|
|||||||
blockChain.SetBlockForNumber(testBlock1, testBlock1.NumberU64())
|
blockChain.SetBlockForNumber(testBlock1, testBlock1.NumberU64())
|
||||||
blockChain.SetReceiptsForHash(testBlock1.Hash(), testReceipts1)
|
blockChain.SetReceiptsForHash(testBlock1.Hash(), testReceipts1)
|
||||||
|
|
||||||
service := statediff.NewService(&blockChain, statediff.Config{}, &mocks.Backend{}, &indexer)
|
service, err := statediff.NewService(statediff.Config{}, &blockChain, &mocks.Backend{}, &indexer)
|
||||||
service.Builder = &builder
|
|
||||||
|
|
||||||
// delay to avoid subscription request being sent after statediff is written,
|
|
||||||
// and timeout to prevent hanging just in case it still happens
|
|
||||||
writeDelay := 100 * time.Millisecond
|
|
||||||
jobTimeout := 200 * time.Millisecond
|
|
||||||
client := makeClient(service)
|
|
||||||
defer client.Close()
|
|
||||||
|
|
||||||
ws, err := subscribeWrites(client)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
service.Builder = &builder
|
||||||
|
api := statediff.NewPublicAPI(service)
|
||||||
|
|
||||||
|
// delay to avoid subscription request being sent after statediff is written
|
||||||
|
writeDelay := 200 * time.Millisecond
|
||||||
|
// timeout to prevent hanging just in case it still happens
|
||||||
|
jobTimeout := 2 * time.Second
|
||||||
|
|
||||||
|
ws := subscribeWritesService(t, api)
|
||||||
time.Sleep(writeDelay)
|
time.Sleep(writeDelay)
|
||||||
job := service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
job := service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
||||||
ok, err := ws.await(job, jobTimeout)
|
ok, err := ws.awaitStatus(job, jobTimeout)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
@ -372,40 +314,28 @@ func TestWriteStateDiffAt(t *testing.T) {
|
|||||||
require.Equal(t, parentBlock1.Root(), builder.Args.OldStateRoot)
|
require.Equal(t, parentBlock1.Root(), builder.Args.OldStateRoot)
|
||||||
require.Equal(t, testBlock1.Root(), builder.Args.NewStateRoot)
|
require.Equal(t, testBlock1.Root(), builder.Args.NewStateRoot)
|
||||||
|
|
||||||
// unsubscribe and verify we get nothing
|
// verify we get nothing after unsubscribing
|
||||||
// TODO - StreamWrites receives EOF error after unsubscribing. Doesn't seem to impact
|
ws.unsubscribe()
|
||||||
// anything but would be good to know why.
|
|
||||||
ws.sub.Unsubscribe()
|
|
||||||
time.Sleep(writeDelay)
|
|
||||||
job = service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
job = service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
||||||
ok, _ = ws.await(job, jobTimeout)
|
ok, _ = ws.awaitStatus(job, jobTimeout)
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
|
|
||||||
client.Close()
|
|
||||||
client = makeClient(service)
|
|
||||||
|
|
||||||
// re-subscribe and test again
|
// re-subscribe and test again
|
||||||
ws, err = subscribeWrites(client)
|
ws = subscribeWritesService(t, api)
|
||||||
require.NoError(t, err)
|
|
||||||
time.Sleep(writeDelay)
|
time.Sleep(writeDelay)
|
||||||
job = service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
job = service.WriteStateDiffAt(testBlock1.NumberU64(), defaultParams)
|
||||||
ok, err = ws.await(job, jobTimeout)
|
ok, err = ws.awaitStatus(job, jobTimeout)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitForSync(t *testing.T) {
|
|
||||||
testWaitForSync(t)
|
|
||||||
testGetSyncStatus(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function will create a backend and service object which includes a generic Backend
|
// This function will create a backend and service object which includes a generic Backend
|
||||||
func createServiceWithMockBackend(curBlock uint64, highestBlock uint64) (*mocks.Backend, *statediff.Service) {
|
func createServiceWithMockBackend(t *testing.T, curBlock uint64, highestBlock uint64) (*mocks.Backend, *statediff.Service) {
|
||||||
builder := mocks.Builder{}
|
builder := mocks.Builder{}
|
||||||
blockChain := mocks.BlockChain{}
|
blockChain := mocks.BlockChain{}
|
||||||
backend := mocks.Backend{
|
backend := mocks.NewBackend(t, ethereum.SyncProgress{
|
||||||
StartingBlock: 1,
|
StartingBlock: 1,
|
||||||
CurrBlock: curBlock,
|
CurrentBlock: curBlock,
|
||||||
HighestBlock: highestBlock,
|
HighestBlock: highestBlock,
|
||||||
SyncedAccounts: 5,
|
SyncedAccounts: 5,
|
||||||
SyncedAccountBytes: 5,
|
SyncedAccountBytes: 5,
|
||||||
@ -419,117 +349,54 @@ func createServiceWithMockBackend(curBlock uint64, highestBlock uint64) (*mocks.
|
|||||||
HealedBytecodeBytes: 5,
|
HealedBytecodeBytes: 5,
|
||||||
HealingTrienodes: 5,
|
HealingTrienodes: 5,
|
||||||
HealingBytecode: 5,
|
HealingBytecode: 5,
|
||||||
}
|
})
|
||||||
|
|
||||||
service := &statediff.Service{
|
service := &statediff.Service{
|
||||||
Mutex: sync.Mutex{},
|
|
||||||
Builder: &builder,
|
Builder: &builder,
|
||||||
BlockChain: &blockChain,
|
BlockChain: &blockChain,
|
||||||
QuitChan: make(chan bool),
|
QuitChan: make(chan bool),
|
||||||
Subscriptions: make(map[common.Hash]map[rpc.ID]statediff.Subscription),
|
Subscriptions: make(map[common.Hash]map[statediff.SubID]statediff.Subscription),
|
||||||
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
SubscriptionTypes: make(map[common.Hash]statediff.Params),
|
||||||
BlockCache: statediff.NewBlockCache(1),
|
BlockCache: statediff.NewBlockCache(1),
|
||||||
BackendAPI: &backend,
|
BackendAPI: backend,
|
||||||
WaitForSync: true,
|
ShouldWaitForSync: true,
|
||||||
}
|
}
|
||||||
return &backend, service
|
return backend, service
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will test to make sure that the state diff waits
|
// TestWaitForSync ensures that the service waits until the blockchain has caught up to head
|
||||||
// until the blockchain has caught up to head!
|
func TestWaitForSync(t *testing.T) {
|
||||||
func testWaitForSync(t *testing.T) {
|
// Trivial case
|
||||||
t.Log("Starting Sync")
|
_, service := createServiceWithMockBackend(t, 10, 10)
|
||||||
_, service := createServiceWithMockBackend(10, 10)
|
service.WaitForSync()
|
||||||
err := service.WaitingForSync()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Sync Failed")
|
|
||||||
}
|
|
||||||
t.Log("Sync Complete")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test will run the WaitForSync() at the start of the execusion
|
// Catching-up case
|
||||||
// It will then incrementally increase the currentBlock to match the highestBlock
|
|
||||||
// At each interval it will run the GetSyncStatus to ensure that the return value is not false.
|
|
||||||
// It will also check to make sure that the WaitForSync() function has not completed!
|
|
||||||
func testGetSyncStatus(t *testing.T) {
|
|
||||||
t.Log("Starting Get Sync Status Test")
|
|
||||||
var highestBlock uint64 = 5
|
var highestBlock uint64 = 5
|
||||||
// Create a backend and a service
|
// Create a service and a backend that is lagging behind the sync.
|
||||||
// the backend is lagging behind the sync.
|
backend, service := createServiceWithMockBackend(t, 0, highestBlock)
|
||||||
backend, service := createServiceWithMockBackend(0, highestBlock)
|
syncComplete := make(chan int, 1)
|
||||||
|
|
||||||
checkSyncComplete := make(chan int, 1)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Start the sync function which will wait for the sync
|
service.WaitForSync()
|
||||||
// Once the sync is complete add a value to the checkSyncComplet channel
|
syncComplete <- 0
|
||||||
t.Log("Starting Sync")
|
|
||||||
err := service.WaitingForSync()
|
|
||||||
if err != nil {
|
|
||||||
t.Error("Sync Failed")
|
|
||||||
checkSyncComplete <- 1
|
|
||||||
}
|
|
||||||
t.Log("We have finally synced!")
|
|
||||||
checkSyncComplete <- 0
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tables := []struct {
|
// Iterate blocks, updating the current synced block
|
||||||
currentBlock uint64
|
for currentBlock := uint64(0); currentBlock <= highestBlock; currentBlock++ {
|
||||||
highestBlock uint64
|
backend.SetCurrentBlock(currentBlock)
|
||||||
}{
|
if currentBlock < highestBlock {
|
||||||
{1, highestBlock},
|
// Ensure we are still waiting if we haven't actually reached head
|
||||||
{2, highestBlock},
|
require.Equal(t, len(syncComplete), 0)
|
||||||
{3, highestBlock},
|
}
|
||||||
{4, highestBlock},
|
|
||||||
{5, highestBlock},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
timeout := time.After(time.Second)
|
||||||
for _, table := range tables {
|
for {
|
||||||
// Iterate over each block
|
select {
|
||||||
// Once the highest block reaches the current block the sync should complete
|
case <-syncComplete:
|
||||||
|
return
|
||||||
// Update the backend current block value
|
case <-timeout:
|
||||||
t.Log("Updating Current Block to: ", table.currentBlock)
|
t.Fatal("timed out waiting for sync to complete")
|
||||||
backend.CurrBlock = table.currentBlock
|
|
||||||
pubEthAPI := ethapi.NewEthereumAPI(service.BackendAPI)
|
|
||||||
syncStatus, err := service.GetSyncStatus(pubEthAPI)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Sync Failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Make sure if syncStatus is false that WaitForSync has completed!
|
|
||||||
if !syncStatus && len(checkSyncComplete) == 0 {
|
|
||||||
t.Error("Sync is complete but WaitForSync is not")
|
|
||||||
}
|
|
||||||
|
|
||||||
if syncStatus && len(checkSyncComplete) == 1 {
|
|
||||||
t.Error("Sync is not complete but WaitForSync is")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure sync hasn't completed and that the checkSyncComplete channel is empty
|
|
||||||
if syncStatus && len(checkSyncComplete) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This code will only be run if the sync is complete and the WaitForSync function is complete
|
|
||||||
|
|
||||||
// If syncstatus is complete, make sure that the blocks match
|
|
||||||
if !syncStatus && table.currentBlock != table.highestBlock {
|
|
||||||
t.Errorf("syncStatus indicated sync was complete even when current block, %d, and highest block %d aren't equal",
|
|
||||||
table.currentBlock, table.highestBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that WaitForSync completed once the current block caught up to head!
|
|
||||||
checkSyncCompleteVal := <-checkSyncComplete
|
|
||||||
if checkSyncCompleteVal != 0 {
|
|
||||||
t.Errorf("syncStatus indicated sync was complete but the checkSyncComplete has a value of %d",
|
|
||||||
checkSyncCompleteVal)
|
|
||||||
} else {
|
|
||||||
t.Log("Test Passed!")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
24
test/compose.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
services:
|
||||||
|
migrations:
|
||||||
|
restart: on-failure
|
||||||
|
depends_on:
|
||||||
|
- ipld-eth-db
|
||||||
|
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.0.2-alpha
|
||||||
|
environment:
|
||||||
|
DATABASE_USER: "vdbm"
|
||||||
|
DATABASE_NAME: "cerc_testing"
|
||||||
|
DATABASE_PASSWORD: "password"
|
||||||
|
DATABASE_HOSTNAME: "ipld-eth-db"
|
||||||
|
DATABASE_PORT: 5432
|
||||||
|
|
||||||
|
ipld-eth-db:
|
||||||
|
image: timescale/timescaledb:latest-pg14
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: "vdbm"
|
||||||
|
POSTGRES_DB: "cerc_testing"
|
||||||
|
POSTGRES_PASSWORD: "password"
|
||||||
|
ports:
|
||||||
|
- 127.0.0.1:8077:5432
|
||||||
|
volumes:
|
||||||
|
- ../indexer/database/file:/file_indexer
|
1
test/stack-refs.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
github.com/cerc-io/ipld-eth-db v5.0.2-alpha
|
71
test_helpers/builder.go
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
package test_helpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestCase struct {
|
||||||
|
Name string
|
||||||
|
Args statediff.Args
|
||||||
|
Expected *sdtypes.StateObject
|
||||||
|
}
|
||||||
|
|
||||||
|
type CheckedRoots = map[*types.Block][]byte
|
||||||
|
|
||||||
|
func RunBuilderTests(
|
||||||
|
t *testing.T,
|
||||||
|
builder statediff.Builder,
|
||||||
|
tests []TestCase,
|
||||||
|
params statediff.Params,
|
||||||
|
roots CheckedRoots,
|
||||||
|
) {
|
||||||
|
for _, test := range tests {
|
||||||
|
diff, err := builder.BuildStateDiffObject(test.Args, params)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
receivedStateDiffRlp, err := rlp.EncodeToBytes(&diff)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
expectedStateDiffRlp, err := rlp.EncodeToBytes(test.Expected)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
sort.Slice(receivedStateDiffRlp, func(i, j int) bool {
|
||||||
|
return receivedStateDiffRlp[i] < receivedStateDiffRlp[j]
|
||||||
|
})
|
||||||
|
sort.Slice(expectedStateDiffRlp, func(i, j int) bool {
|
||||||
|
return expectedStateDiffRlp[i] < expectedStateDiffRlp[j]
|
||||||
|
})
|
||||||
|
if !bytes.Equal(receivedStateDiffRlp, expectedStateDiffRlp) {
|
||||||
|
actualb, err := json.Marshal(diff)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedb, err := json.Marshal(test.Expected)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var expected, actual interface{}
|
||||||
|
err = json.Unmarshal(expectedb, &expected)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = json.Unmarshal(actualb, &actual)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, expected, actual, test.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Let's also confirm that our root state nodes form the state root hash in the headers
|
||||||
|
for block, node := range roots {
|
||||||
|
require.Equal(t, block.Root(), crypto.Keccak256Hash(node),
|
||||||
|
"expected root does not match actual root", block.Number())
|
||||||
|
}
|
||||||
|
}
|
@ -27,6 +27,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance, baseFee *big.Int, initialGasLimit uint64) *types.Block {
|
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance, baseFee *big.Int, initialGasLimit uint64) *types.Block {
|
||||||
@ -65,7 +67,7 @@ func TestSelfDestructChainGen(i int, block *core.BlockGen) {
|
|||||||
// Block 2 is mined by TestBankAddress
|
// Block 2 is mined by TestBankAddress
|
||||||
// TestBankAddress self-destructs the contract
|
// TestBankAddress self-destructs the contract
|
||||||
block.SetCoinbase(TestBankAddress)
|
block.SetCoinbase(TestBankAddress)
|
||||||
data := common.Hex2Bytes("43D726D6")
|
data := utils.Hex2Bytes("43D726D6")
|
||||||
tx, _ := types.SignTx(types.NewTransaction(1, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.GWei), data), signer, TestBankKey)
|
tx, _ := types.SignTx(types.NewTransaction(1, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.GWei), data), signer, TestBankKey)
|
||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
}
|
}
|
||||||
@ -97,7 +99,7 @@ func TestChainGen(i int, block *core.BlockGen) {
|
|||||||
block.SetCoinbase(Account2Addr)
|
block.SetCoinbase(Account2Addr)
|
||||||
//put function: c16431b9
|
//put function: c16431b9
|
||||||
//close function: 43d726d6
|
//close function: 43d726d6
|
||||||
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003")
|
data := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003")
|
||||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(TestBankAddress), ContractAddr, big.NewInt(0), params.TxGasContractCreation, big.NewInt(params.GWei), data), signer, TestBankKey)
|
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(TestBankAddress), ContractAddr, big.NewInt(0), params.TxGasContractCreation, big.NewInt(params.GWei), data), signer, TestBankKey)
|
||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
case 3:
|
case 3:
|
||||||
@ -105,9 +107,9 @@ func TestChainGen(i int, block *core.BlockGen) {
|
|||||||
// Two set the two original slot positions to 0 and one sets another position to a new value
|
// Two set the two original slot positions to 0 and one sets another position to a new value
|
||||||
// Block 4 is mined by Account2Addr
|
// Block 4 is mined by Account2Addr
|
||||||
block.SetCoinbase(Account2Addr)
|
block.SetCoinbase(Account2Addr)
|
||||||
data1 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
data1 := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
data2 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000")
|
data2 := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000")
|
||||||
data3 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009")
|
data3 := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000009")
|
||||||
|
|
||||||
nonce := block.TxNonce(TestBankAddress)
|
nonce := block.TxNonce(TestBankAddress)
|
||||||
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
||||||
@ -123,8 +125,8 @@ func TestChainGen(i int, block *core.BlockGen) {
|
|||||||
// It sets the one storage value to zero and the other to new value.
|
// It sets the one storage value to zero and the other to new value.
|
||||||
// Block 5 is mined by Account1Addr
|
// Block 5 is mined by Account1Addr
|
||||||
block.SetCoinbase(Account1Addr)
|
block.SetCoinbase(Account1Addr)
|
||||||
data1 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000")
|
data1 := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000")
|
||||||
data2 := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003")
|
data2 := utils.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003")
|
||||||
nonce := block.TxNonce(TestBankAddress)
|
nonce := block.TxNonce(TestBankAddress)
|
||||||
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
||||||
nonce++
|
nonce++
|
||||||
@ -135,7 +137,7 @@ func TestChainGen(i int, block *core.BlockGen) {
|
|||||||
// Block 6 has a tx from Account1Key which self-destructs the contract, it transfers no value
|
// Block 6 has a tx from Account1Key which self-destructs the contract, it transfers no value
|
||||||
// Block 6 is mined by Account2Addr
|
// Block 6 is mined by Account2Addr
|
||||||
block.SetCoinbase(Account2Addr)
|
block.SetCoinbase(Account2Addr)
|
||||||
data := common.Hex2Bytes("43D726D6")
|
data := utils.Hex2Bytes("43D726D6")
|
||||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(Account1Addr), ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data), signer, Account1Key)
|
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(Account1Addr), ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data), signer, Account1Key)
|
||||||
block.AddTx(tx)
|
block.AddTx(tx)
|
||||||
}
|
}
|
||||||
@ -158,8 +160,8 @@ func TestChainGenWithInternalLeafNode(i int, block *core.BlockGen) {
|
|||||||
// Block 3 has two transactions which set slots 223 and 648 with small values
|
// Block 3 has two transactions which set slots 223 and 648 with small values
|
||||||
// The goal here is to induce a branch node with an internalized leaf node
|
// The goal here is to induce a branch node with an internalized leaf node
|
||||||
block.SetCoinbase(TestBankAddress)
|
block.SetCoinbase(TestBankAddress)
|
||||||
data1 := common.Hex2Bytes("C16431B90000000000000000000000000000000000000000000000000000000000009dab0000000000000000000000000000000000000000000000000000000000000001")
|
data1 := utils.Hex2Bytes("C16431B90000000000000000000000000000000000000000000000000000000000009dab0000000000000000000000000000000000000000000000000000000000000001")
|
||||||
data2 := common.Hex2Bytes("C16431B90000000000000000000000000000000000000000000000000000000000019c5d0000000000000000000000000000000000000000000000000000000000000002")
|
data2 := utils.Hex2Bytes("C16431B90000000000000000000000000000000000000000000000000000000000019c5d0000000000000000000000000000000000000000000000000000000000000002")
|
||||||
|
|
||||||
nonce := block.TxNonce(TestBankAddress)
|
nonce := block.TxNonce(TestBankAddress)
|
||||||
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
tx1, _ := types.SignTx(types.NewTransaction(nonce, ContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data1), signer, TestBankKey)
|
||||||
|
@ -1,265 +1,74 @@
|
|||||||
// Copyright 2019 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"testing"
|
||||||
"math/big"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/golang/mock/gomock"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/consensus"
|
plugeth "github.com/openrelayxyz/plugeth-utils/core"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
|
||||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/event"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ ethapi.Backend = &Backend{}
|
|
||||||
|
|
||||||
// Builder is a mock state diff builder
|
|
||||||
type Backend struct {
|
type Backend struct {
|
||||||
StartingBlock uint64
|
*MockBackend
|
||||||
CurrBlock uint64
|
downloader Downloader
|
||||||
HighestBlock uint64
|
|
||||||
SyncedAccounts uint64
|
|
||||||
SyncedAccountBytes uint64
|
|
||||||
SyncedBytecodes uint64
|
|
||||||
SyncedBytecodeBytes uint64
|
|
||||||
SyncedStorage uint64
|
|
||||||
SyncedStorageBytes uint64
|
|
||||||
HealedTrienodes uint64
|
|
||||||
HealedTrienodeBytes uint64
|
|
||||||
HealedBytecodes uint64
|
|
||||||
HealedBytecodeBytes uint64
|
|
||||||
HealingTrienodes uint64
|
|
||||||
HealingBytecode uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// General Ethereum API
|
type Downloader struct {
|
||||||
func (backend *Backend) SyncProgress() ethereum.SyncProgress {
|
ethereum.SyncProgress
|
||||||
l := ethereum.SyncProgress{
|
}
|
||||||
StartingBlock: backend.StartingBlock,
|
|
||||||
CurrentBlock: backend.CurrBlock,
|
var _ plugeth.Backend = &Backend{}
|
||||||
HighestBlock: backend.HighestBlock,
|
var _ plugeth.Downloader = &Downloader{}
|
||||||
SyncedAccounts: backend.SyncedAccounts,
|
|
||||||
SyncedAccountBytes: backend.SyncedAccountBytes,
|
func NewBackend(t *testing.T, progress ethereum.SyncProgress) *Backend {
|
||||||
SyncedBytecodes: backend.SyncedBytecodes,
|
ctl := gomock.NewController(t)
|
||||||
SyncedBytecodeBytes: backend.SyncedBytecodeBytes,
|
dler := Downloader{progress}
|
||||||
SyncedStorage: backend.SyncedStorage,
|
ret := &Backend{
|
||||||
SyncedStorageBytes: backend.SyncedStorageBytes,
|
MockBackend: NewMockBackend(ctl),
|
||||||
HealedTrienodes: backend.HealedTrienodes,
|
downloader: dler,
|
||||||
HealedTrienodeBytes: backend.HealedTrienodeBytes,
|
|
||||||
HealedBytecodes: backend.HealedBytecodes,
|
|
||||||
HealedBytecodeBytes: backend.HealedBytecodeBytes,
|
|
||||||
HealingTrienodes: backend.HealingTrienodes,
|
|
||||||
HealingBytecode: backend.HealingBytecode,
|
|
||||||
}
|
}
|
||||||
return l
|
ret.EXPECT().Downloader().Return(&ret.downloader).AnyTimes()
|
||||||
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (backend *Backend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
|
func (b *Backend) SetCurrentBlock(block uint64) {
|
||||||
panic("not implemented") // TODO: Implement
|
b.downloader.SyncProgress.CurrentBlock = block
|
||||||
}
|
}
|
||||||
|
|
||||||
func (backend *Backend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {
|
func (d Downloader) Progress() plugeth.Progress {
|
||||||
panic("implement me")
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (backend *Backend) ChainDb() ethdb.Database {
|
func (d Downloader) StartingBlock() uint64 { return d.SyncProgress.StartingBlock }
|
||||||
panic("not implemented") // TODO: Implement
|
func (d Downloader) CurrentBlock() uint64 { return d.SyncProgress.CurrentBlock }
|
||||||
}
|
func (d Downloader) HighestBlock() uint64 { return d.SyncProgress.HighestBlock }
|
||||||
|
func (d Downloader) PulledStates() uint64 { return d.SyncProgress.PulledStates }
|
||||||
|
func (d Downloader) KnownStates() uint64 { return d.SyncProgress.KnownStates }
|
||||||
|
func (d Downloader) SyncedAccounts() uint64 { return d.SyncProgress.SyncedAccounts }
|
||||||
|
func (d Downloader) SyncedAccountBytes() uint64 { return d.SyncProgress.SyncedAccountBytes }
|
||||||
|
func (d Downloader) SyncedBytecodes() uint64 { return d.SyncProgress.SyncedBytecodes }
|
||||||
|
func (d Downloader) SyncedBytecodeBytes() uint64 { return d.SyncProgress.SyncedBytecodeBytes }
|
||||||
|
func (d Downloader) SyncedStorage() uint64 { return d.SyncProgress.SyncedStorage }
|
||||||
|
func (d Downloader) SyncedStorageBytes() uint64 { return d.SyncProgress.SyncedStorageBytes }
|
||||||
|
func (d Downloader) HealedTrienodes() uint64 { return d.SyncProgress.HealedTrienodes }
|
||||||
|
func (d Downloader) HealedTrienodeBytes() uint64 { return d.SyncProgress.HealedTrienodeBytes }
|
||||||
|
func (d Downloader) HealedBytecodes() uint64 { return d.SyncProgress.HealedBytecodes }
|
||||||
|
func (d Downloader) HealedBytecodeBytes() uint64 { return d.SyncProgress.HealedBytecodeBytes }
|
||||||
|
func (d Downloader) HealingTrienodes() uint64 { return d.SyncProgress.HealingTrienodes }
|
||||||
|
func (d Downloader) HealingBytecode() uint64 { return d.SyncProgress.HealingBytecode }
|
||||||
|
|
||||||
func (backend *Backend) AccountManager() *accounts.Manager {
|
func TestBackend(t *testing.T) {
|
||||||
panic("not implemented") // TODO: Implement
|
b := NewBackend(t, ethereum.SyncProgress{StartingBlock: 42})
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) ExtRPCEnabled() bool {
|
block := b.Downloader().Progress().StartingBlock()
|
||||||
panic("not implemented") // TODO: Implement
|
if 42 != block {
|
||||||
}
|
t.Fatalf("wrong StartingBlock; expected %d, got %d", 42, block)
|
||||||
|
}
|
||||||
|
|
||||||
func (backend *Backend) RPCGasCap() uint64 {
|
b.SetCurrentBlock(420)
|
||||||
panic("not implemented") // TODO: Implement
|
block = b.Downloader().Progress().CurrentBlock()
|
||||||
}
|
if 420 != block {
|
||||||
|
t.Fatalf("wrong CurrentBlock; expected %d, got %d", 420, block)
|
||||||
func (backend *Backend) RPCEVMTimeout() time.Duration {
|
}
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) RPCTxFeeCap() float64 {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) UnprotectedAllowed() bool {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blockchain API
|
|
||||||
func (backend *Backend) SetHead(number uint64) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) CurrentHeader() *types.Header {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) CurrentBlock() *types.Header {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transaction pool API
|
|
||||||
func (backend *Backend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetPoolTransactions() (types.Transactions, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetPoolTransaction(txHash common.Hash) *types.Transaction {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) Stats() (pending int, queued int) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeNewTxsEvent(_ chan<- core.NewTxsEvent) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter API
|
|
||||||
func (backend *Backend) BloomStatus() (uint64, uint64) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) ChainConfig() *params.ChainConfig {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) Engine() consensus.Engine {
|
|
||||||
panic("not implemented") // TODO: Implement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (backend *Backend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
|
17
test_helpers/mocks/backend_test.go
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package mocks_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/test_helpers/mocks"
|
||||||
|
"github.com/ethereum/go-ethereum"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBackend(t *testing.T) {
|
||||||
|
startingblock := uint64(42)
|
||||||
|
b := mocks.NewBackend(t, ethereum.SyncProgress{StartingBlock: startingblock})
|
||||||
|
block := b.Downloader().Progress().StartingBlock()
|
||||||
|
if startingblock != block {
|
||||||
|
t.Fatalf("wrong StartingBlock; expected %d, got %d", startingblock, block)
|
||||||
|
}
|
||||||
|
}
|
@ -21,8 +21,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -68,7 +67,7 @@ func (bc *BlockChain) SetChainEvents(chainEvents []core.ChainEvent) {
|
|||||||
|
|
||||||
// SubscribeChainEvent mock method
|
// SubscribeChainEvent mock method
|
||||||
func (bc *BlockChain) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
func (bc *BlockChain) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
|
||||||
subErr := errors.New("subscription error")
|
subErr := errors.New("mock subscription error")
|
||||||
|
|
||||||
var eventCounter int
|
var eventCounter int
|
||||||
subscription := event.NewSubscription(func(quit <-chan struct{}) error {
|
subscription := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||||
@ -150,8 +149,7 @@ func (bc *BlockChain) SetTd(hash common.Hash, blockNum uint64, td *big.Int) {
|
|||||||
bc.TDByNum[blockNum] = td
|
bc.TDByNum[blockNum] = td
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bc *BlockChain) UnlockTrie(root common.Hash) {}
|
// TODO
|
||||||
|
func (bc *BlockChain) StateCache() adapt.StateView {
|
||||||
func (bc *BlockChain) StateCache() state.Database {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,8 @@
|
|||||||
package mocks
|
package mocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
statediff "github.com/cerc-io/plugeth-statediff"
|
||||||
"github.com/ethereum/go-ethereum/statediff"
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ statediff.Builder = &Builder{}
|
var _ statediff.Builder = &Builder{}
|
||||||
@ -29,8 +28,6 @@ type Builder struct {
|
|||||||
Args statediff.Args
|
Args statediff.Args
|
||||||
Params statediff.Params
|
Params statediff.Params
|
||||||
stateDiff sdtypes.StateObject
|
stateDiff sdtypes.StateObject
|
||||||
block *types.Block
|
|
||||||
stateTrie sdtypes.StateObject
|
|
||||||
builderError error
|
builderError error
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,19 +47,12 @@ func (builder *Builder) WriteStateDiffObject(args statediff.Args, params statedi
|
|||||||
return builder.builderError
|
return builder.builderError
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildStateTrieObject mock method
|
|
||||||
func (builder *Builder) BuildStateTrieObject(block *types.Block) (sdtypes.StateObject, error) {
|
|
||||||
builder.block = block
|
|
||||||
|
|
||||||
return builder.stateTrie, builder.builderError
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStateDiffToBuild mock method
|
// SetStateDiffToBuild mock method
|
||||||
func (builder *Builder) SetStateDiffToBuild(stateDiff sdtypes.StateObject) {
|
func (builder *Builder) SetStateDiffToBuild(stateDiff sdtypes.StateObject) {
|
||||||
builder.stateDiff = stateDiff
|
builder.stateDiff = stateDiff
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBuilderError mock method
|
// SetBuilderError mock method
|
||||||
func (builder *Builder) SetBuilderError(err error) {
|
func (builder *Builder) SetError(err error) {
|
||||||
builder.builderError = err
|
builder.builderError = err
|
||||||
}
|
}
|
||||||
|
567
test_helpers/mocks/gen_backend.go
Normal file
@ -0,0 +1,567 @@
|
|||||||
|
// Code generated by MockGen. DO NOT EDIT.
|
||||||
|
// Source: github.com/openrelayxyz/plugeth-utils/core (interfaces: Backend,Downloader)
|
||||||
|
|
||||||
|
// Package mocks is a generated GoMock package.
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
big "math/big"
|
||||||
|
reflect "reflect"
|
||||||
|
|
||||||
|
gomock "github.com/golang/mock/gomock"
|
||||||
|
core "github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockBackend is a mock of Backend interface.
|
||||||
|
type MockBackend struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockBackendMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockBackendMockRecorder is the mock recorder for MockBackend.
|
||||||
|
type MockBackendMockRecorder struct {
|
||||||
|
mock *MockBackend
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockBackend creates a new mock instance.
|
||||||
|
func NewMockBackend(ctrl *gomock.Controller) *MockBackend {
|
||||||
|
mock := &MockBackend{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockBackendMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockBackend) EXPECT() *MockBackendMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockByHash mocks base method.
|
||||||
|
func (m *MockBackend) BlockByHash(arg0 context.Context, arg1 core.Hash) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "BlockByHash", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockByHash indicates an expected call of BlockByHash.
|
||||||
|
func (mr *MockBackendMockRecorder) BlockByHash(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByHash", reflect.TypeOf((*MockBackend)(nil).BlockByHash), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockByNumber mocks base method.
|
||||||
|
func (m *MockBackend) BlockByNumber(arg0 context.Context, arg1 int64) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "BlockByNumber", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockByNumber indicates an expected call of BlockByNumber.
|
||||||
|
func (mr *MockBackendMockRecorder) BlockByNumber(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BlockByNumber", reflect.TypeOf((*MockBackend)(nil).BlockByNumber), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BloomStatus mocks base method.
|
||||||
|
func (m *MockBackend) BloomStatus() (uint64, uint64) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "BloomStatus")
|
||||||
|
ret0, _ := ret[0].(uint64)
|
||||||
|
ret1, _ := ret[1].(uint64)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// BloomStatus indicates an expected call of BloomStatus.
|
||||||
|
func (mr *MockBackendMockRecorder) BloomStatus() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BloomStatus", reflect.TypeOf((*MockBackend)(nil).BloomStatus))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock mocks base method.
|
||||||
|
func (m *MockBackend) CurrentBlock() []byte {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "CurrentBlock")
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentBlock indicates an expected call of CurrentBlock.
|
||||||
|
func (mr *MockBackendMockRecorder) CurrentBlock() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentBlock", reflect.TypeOf((*MockBackend)(nil).CurrentBlock))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentHeader mocks base method.
|
||||||
|
func (m *MockBackend) CurrentHeader() []byte {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "CurrentHeader")
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentHeader indicates an expected call of CurrentHeader.
|
||||||
|
func (mr *MockBackendMockRecorder) CurrentHeader() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentHeader", reflect.TypeOf((*MockBackend)(nil).CurrentHeader))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Downloader mocks base method.
|
||||||
|
func (m *MockBackend) Downloader() core.Downloader {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Downloader")
|
||||||
|
ret0, _ := ret[0].(core.Downloader)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Downloader indicates an expected call of Downloader.
|
||||||
|
func (mr *MockBackendMockRecorder) Downloader() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Downloader", reflect.TypeOf((*MockBackend)(nil).Downloader))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtRPCEnabled mocks base method.
|
||||||
|
func (m *MockBackend) ExtRPCEnabled() bool {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ExtRPCEnabled")
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtRPCEnabled indicates an expected call of ExtRPCEnabled.
|
||||||
|
func (mr *MockBackendMockRecorder) ExtRPCEnabled() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExtRPCEnabled", reflect.TypeOf((*MockBackend)(nil).ExtRPCEnabled))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccountTrie mocks base method.
|
||||||
|
func (m *MockBackend) GetAccountTrie(arg0 core.Hash, arg1 core.Address) (core.Trie, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetAccountTrie", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(core.Trie)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccountTrie indicates an expected call of GetAccountTrie.
|
||||||
|
func (mr *MockBackendMockRecorder) GetAccountTrie(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccountTrie", reflect.TypeOf((*MockBackend)(nil).GetAccountTrie), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContractCode mocks base method.
|
||||||
|
func (m *MockBackend) GetContractCode(arg0 core.Hash) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetContractCode", arg0)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContractCode indicates an expected call of GetContractCode.
|
||||||
|
func (mr *MockBackendMockRecorder) GetContractCode(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContractCode", reflect.TypeOf((*MockBackend)(nil).GetContractCode), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogs mocks base method.
|
||||||
|
func (m *MockBackend) GetLogs(arg0 context.Context, arg1 core.Hash) ([][]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetLogs", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([][]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLogs indicates an expected call of GetLogs.
|
||||||
|
func (mr *MockBackendMockRecorder) GetLogs(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockBackend)(nil).GetLogs), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolNonce mocks base method.
|
||||||
|
func (m *MockBackend) GetPoolNonce(arg0 context.Context, arg1 core.Address) (uint64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetPoolNonce", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(uint64)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolNonce indicates an expected call of GetPoolNonce.
|
||||||
|
func (mr *MockBackendMockRecorder) GetPoolNonce(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolNonce", reflect.TypeOf((*MockBackend)(nil).GetPoolNonce), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolTransaction mocks base method.
|
||||||
|
func (m *MockBackend) GetPoolTransaction(arg0 core.Hash) []byte {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetPoolTransaction", arg0)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolTransaction indicates an expected call of GetPoolTransaction.
|
||||||
|
func (mr *MockBackendMockRecorder) GetPoolTransaction(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolTransaction", reflect.TypeOf((*MockBackend)(nil).GetPoolTransaction), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolTransactions mocks base method.
|
||||||
|
func (m *MockBackend) GetPoolTransactions() ([][]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetPoolTransactions")
|
||||||
|
ret0, _ := ret[0].([][]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolTransactions indicates an expected call of GetPoolTransactions.
|
||||||
|
func (mr *MockBackendMockRecorder) GetPoolTransactions() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPoolTransactions", reflect.TypeOf((*MockBackend)(nil).GetPoolTransactions))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReceipts mocks base method.
|
||||||
|
func (m *MockBackend) GetReceipts(arg0 context.Context, arg1 core.Hash) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetReceipts", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReceipts indicates an expected call of GetReceipts.
|
||||||
|
func (mr *MockBackendMockRecorder) GetReceipts(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceipts", reflect.TypeOf((*MockBackend)(nil).GetReceipts), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTd mocks base method.
|
||||||
|
func (m *MockBackend) GetTd(arg0 context.Context, arg1 core.Hash) *big.Int {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetTd", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*big.Int)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTd indicates an expected call of GetTd.
|
||||||
|
func (mr *MockBackendMockRecorder) GetTd(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTd", reflect.TypeOf((*MockBackend)(nil).GetTd), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTransaction mocks base method.
|
||||||
|
func (m *MockBackend) GetTransaction(arg0 context.Context, arg1 core.Hash) ([]byte, core.Hash, uint64, uint64, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetTransaction", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(core.Hash)
|
||||||
|
ret2, _ := ret[2].(uint64)
|
||||||
|
ret3, _ := ret[3].(uint64)
|
||||||
|
ret4, _ := ret[4].(error)
|
||||||
|
return ret0, ret1, ret2, ret3, ret4
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTransaction indicates an expected call of GetTransaction.
|
||||||
|
func (mr *MockBackendMockRecorder) GetTransaction(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTransaction", reflect.TypeOf((*MockBackend)(nil).GetTransaction), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTrie mocks base method.
|
||||||
|
func (m *MockBackend) GetTrie(arg0 core.Hash) (core.Trie, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "GetTrie", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Trie)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTrie indicates an expected call of GetTrie.
|
||||||
|
func (mr *MockBackendMockRecorder) GetTrie(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrie", reflect.TypeOf((*MockBackend)(nil).GetTrie), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByHash mocks base method.
|
||||||
|
func (m *MockBackend) HeaderByHash(arg0 context.Context, arg1 core.Hash) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "HeaderByHash", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByHash indicates an expected call of HeaderByHash.
|
||||||
|
func (mr *MockBackendMockRecorder) HeaderByHash(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByHash", reflect.TypeOf((*MockBackend)(nil).HeaderByHash), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByNumber mocks base method.
|
||||||
|
func (m *MockBackend) HeaderByNumber(arg0 context.Context, arg1 int64) ([]byte, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "HeaderByNumber", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].([]byte)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderByNumber indicates an expected call of HeaderByNumber.
|
||||||
|
func (mr *MockBackendMockRecorder) HeaderByNumber(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeaderByNumber", reflect.TypeOf((*MockBackend)(nil).HeaderByNumber), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCGasCap mocks base method.
|
||||||
|
func (m *MockBackend) RPCGasCap() uint64 {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RPCGasCap")
|
||||||
|
ret0, _ := ret[0].(uint64)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCGasCap indicates an expected call of RPCGasCap.
|
||||||
|
func (mr *MockBackendMockRecorder) RPCGasCap() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RPCGasCap", reflect.TypeOf((*MockBackend)(nil).RPCGasCap))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCTxFeeCap mocks base method.
|
||||||
|
func (m *MockBackend) RPCTxFeeCap() float64 {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RPCTxFeeCap")
|
||||||
|
ret0, _ := ret[0].(float64)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCTxFeeCap indicates an expected call of RPCTxFeeCap.
|
||||||
|
func (mr *MockBackendMockRecorder) RPCTxFeeCap() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RPCTxFeeCap", reflect.TypeOf((*MockBackend)(nil).RPCTxFeeCap))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendTx mocks base method.
|
||||||
|
func (m *MockBackend) SendTx(arg0 context.Context, arg1 []byte) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SendTx", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendTx indicates an expected call of SendTx.
|
||||||
|
func (mr *MockBackendMockRecorder) SendTx(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendTx", reflect.TypeOf((*MockBackend)(nil).SendTx), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHead mocks base method.
|
||||||
|
func (m *MockBackend) SetHead(arg0 uint64) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
m.ctrl.Call(m, "SetHead", arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHead indicates an expected call of SetHead.
|
||||||
|
func (mr *MockBackendMockRecorder) SetHead(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHead", reflect.TypeOf((*MockBackend)(nil).SetHead), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats mocks base method.
|
||||||
|
func (m *MockBackend) Stats() (int, int) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Stats")
|
||||||
|
ret0, _ := ret[0].(int)
|
||||||
|
ret1, _ := ret[1].(int)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats indicates an expected call of Stats.
|
||||||
|
func (mr *MockBackendMockRecorder) Stats() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockBackend)(nil).Stats))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeChainEvent(arg0 chan<- core.ChainEvent) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeChainEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainEvent indicates an expected call of SubscribeChainEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeChainEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainHeadEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeChainHeadEvent(arg0 chan<- core.ChainHeadEvent) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeChainHeadEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainHeadEvent indicates an expected call of SubscribeChainHeadEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeChainHeadEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainHeadEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainHeadEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainSideEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeChainSideEvent(arg0 chan<- core.ChainSideEvent) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeChainSideEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeChainSideEvent indicates an expected call of SubscribeChainSideEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeChainSideEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeChainSideEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeChainSideEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeLogsEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeLogsEvent(arg0 chan<- [][]byte) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeLogsEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeLogsEvent indicates an expected call of SubscribeLogsEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeLogsEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeLogsEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeNewTxsEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeNewTxsEvent(arg0 chan<- core.NewTxsEvent) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeNewTxsEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeNewTxsEvent indicates an expected call of SubscribeNewTxsEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeNewTxsEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeNewTxsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeNewTxsEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribePendingLogsEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribePendingLogsEvent(arg0 chan<- [][]byte) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribePendingLogsEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribePendingLogsEvent indicates an expected call of SubscribePendingLogsEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribePendingLogsEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribePendingLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribePendingLogsEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeRemovedLogsEvent mocks base method.
|
||||||
|
func (m *MockBackend) SubscribeRemovedLogsEvent(arg0 chan<- []byte) core.Subscription {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SubscribeRemovedLogsEvent", arg0)
|
||||||
|
ret0, _ := ret[0].(core.Subscription)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscribeRemovedLogsEvent indicates an expected call of SubscribeRemovedLogsEvent.
|
||||||
|
func (mr *MockBackendMockRecorder) SubscribeRemovedLogsEvent(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeRemovedLogsEvent", reflect.TypeOf((*MockBackend)(nil).SubscribeRemovedLogsEvent), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestGasTipCap mocks base method.
|
||||||
|
func (m *MockBackend) SuggestGasTipCap(arg0 context.Context) (*big.Int, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "SuggestGasTipCap", arg0)
|
||||||
|
ret0, _ := ret[0].(*big.Int)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// SuggestGasTipCap indicates an expected call of SuggestGasTipCap.
|
||||||
|
func (mr *MockBackendMockRecorder) SuggestGasTipCap(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SuggestGasTipCap", reflect.TypeOf((*MockBackend)(nil).SuggestGasTipCap), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxPoolContent mocks base method.
|
||||||
|
func (m *MockBackend) TxPoolContent() (map[core.Address][][]byte, map[core.Address][][]byte) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "TxPoolContent")
|
||||||
|
ret0, _ := ret[0].(map[core.Address][][]byte)
|
||||||
|
ret1, _ := ret[1].(map[core.Address][][]byte)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// TxPoolContent indicates an expected call of TxPoolContent.
|
||||||
|
func (mr *MockBackendMockRecorder) TxPoolContent() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxPoolContent", reflect.TypeOf((*MockBackend)(nil).TxPoolContent))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnprotectedAllowed mocks base method.
|
||||||
|
func (m *MockBackend) UnprotectedAllowed() bool {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "UnprotectedAllowed")
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnprotectedAllowed indicates an expected call of UnprotectedAllowed.
|
||||||
|
func (mr *MockBackendMockRecorder) UnprotectedAllowed() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnprotectedAllowed", reflect.TypeOf((*MockBackend)(nil).UnprotectedAllowed))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockDownloader is a mock of Downloader interface.
|
||||||
|
type MockDownloader struct {
|
||||||
|
ctrl *gomock.Controller
|
||||||
|
recorder *MockDownloaderMockRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// MockDownloaderMockRecorder is the mock recorder for MockDownloader.
|
||||||
|
type MockDownloaderMockRecorder struct {
|
||||||
|
mock *MockDownloader
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockDownloader creates a new mock instance.
|
||||||
|
func NewMockDownloader(ctrl *gomock.Controller) *MockDownloader {
|
||||||
|
mock := &MockDownloader{ctrl: ctrl}
|
||||||
|
mock.recorder = &MockDownloaderMockRecorder{mock}
|
||||||
|
return mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||||
|
func (m *MockDownloader) EXPECT() *MockDownloaderMockRecorder {
|
||||||
|
return m.recorder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress mocks base method.
|
||||||
|
func (m *MockDownloader) Progress() core.Progress {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "Progress")
|
||||||
|
ret0, _ := ret[0].(core.Progress)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress indicates an expected call of Progress.
|
||||||
|
func (mr *MockDownloaderMockRecorder) Progress() *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Progress", reflect.TypeOf((*MockDownloader)(nil).Progress))
|
||||||
|
}
|
@ -22,8 +22,10 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
|
|
||||||
sdtypes "github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||||
|
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||||
|
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
var _ interfaces.StateDiffIndexer = &StateDiffIndexer{}
|
||||||
@ -32,8 +34,20 @@ var _ interfaces.Batch = &batch{}
|
|||||||
// StateDiffIndexer is a mock state diff indexer
|
// StateDiffIndexer is a mock state diff indexer
|
||||||
type StateDiffIndexer struct{}
|
type StateDiffIndexer struct{}
|
||||||
|
|
||||||
|
func (sdi *StateDiffIndexer) DetectGaps(beginBlock uint64, endBlock uint64) ([]*interfaces.BlockGap, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sdi *StateDiffIndexer) CurrentBlock() (*models.HeaderModel, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
type batch struct{}
|
type batch struct{}
|
||||||
|
|
||||||
|
func (sdi *StateDiffIndexer) HasBlock(hash common.Hash, number uint64) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
|
func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receipts, totalDifficulty *big.Int) (interfaces.Batch, error) {
|
||||||
return &batch{}, nil
|
return &batch{}, nil
|
||||||
}
|
}
|
||||||
@ -48,9 +62,7 @@ func (sdi *StateDiffIndexer) PushIPLD(txi interfaces.Batch, ipld sdtypes.IPLD) e
|
|||||||
|
|
||||||
func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bool) {}
|
func (sdi *StateDiffIndexer) ReportDBMetrics(delay time.Duration, quit <-chan bool) {}
|
||||||
|
|
||||||
func (sdi *StateDiffIndexer) LoadWatchedAddresses() ([]common.Address, error) {
|
func (sdi *StateDiffIndexer) LoadWatchedAddresses() ([]common.Address, error) { return nil, nil }
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sdi *StateDiffIndexer) InsertWatchedAddresses(addresses []sdtypes.WatchAddressArg, currentBlock *big.Int) error {
|
func (sdi *StateDiffIndexer) InsertWatchedAddresses(addresses []sdtypes.WatchAddressArg, currentBlock *big.Int) error {
|
||||||
return nil
|
return nil
|
||||||
|
@ -25,6 +25,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddressToLeafKey hashes an returns an address
|
// AddressToLeafKey hashes an returns an address
|
||||||
@ -47,7 +49,7 @@ var (
|
|||||||
NullCodeHash = crypto.Keccak256Hash([]byte{})
|
NullCodeHash = crypto.Keccak256Hash([]byte{})
|
||||||
StoragePath = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes()
|
StoragePath = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes()
|
||||||
StorageKey = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()
|
StorageKey = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()
|
||||||
StorageValue = common.Hex2Bytes("0x03")
|
StorageValue = utils.Hex2Bytes("0x03")
|
||||||
NullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
NullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
|
||||||
Testdb = rawdb.NewMemoryDatabase()
|
Testdb = rawdb.NewMemoryDatabase()
|
||||||
@ -64,11 +66,11 @@ var (
|
|||||||
Account2Addr = crypto.PubkeyToAddress(Account2Key.PublicKey) //0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e
|
Account2Addr = crypto.PubkeyToAddress(Account2Key.PublicKey) //0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e
|
||||||
Account1LeafKey = AddressToLeafKey(Account1Addr)
|
Account1LeafKey = AddressToLeafKey(Account1Addr)
|
||||||
Account2LeafKey = AddressToLeafKey(Account2Addr)
|
Account2LeafKey = AddressToLeafKey(Account2Addr)
|
||||||
ContractCode = common.Hex2Bytes("608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506040518060200160405280600160ff16815250600190600161007492919061007a565b506100e4565b82606481019282156100ae579160200282015b828111156100ad578251829060ff1690559160200191906001019061008d565b5b5090506100bb91906100bf565b5090565b6100e191905b808211156100dd5760008160009055506001016100c5565b5090565b90565b6101ca806100f36000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101746022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b806001836064811061016a57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a72305820e3747183708fb6bff3f6f7a80fb57dcc1c19f83f9cb25457a3ed5c0424bde66864736f6c634300050a0032")
|
ContractCode = utils.Hex2Bytes("608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506040518060200160405280600160ff16815250600190600161007492919061007a565b506100e4565b82606481019282156100ae579160200282015b828111156100ad578251829060ff1690559160200191906001019061008d565b5b5090506100bb91906100bf565b5090565b6100e191905b808211156100dd5760008160009055506001016100c5565b5090565b90565b6101ca806100f36000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101746022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b806001836064811061016a57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a72305820e3747183708fb6bff3f6f7a80fb57dcc1c19f83f9cb25457a3ed5c0424bde66864736f6c634300050a0032")
|
||||||
ByteCodeAfterDeployment = common.Hex2Bytes("608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101746022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b806001836064811061016a57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a72305820e3747183708fb6bff3f6f7a80fb57dcc1c19f83f9cb25457a3ed5c0424bde66864736f6c634300050a0032")
|
ByteCodeAfterDeployment = utils.Hex2Bytes("608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101746022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b806001836064811061016a57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a72305820e3747183708fb6bff3f6f7a80fb57dcc1c19f83f9cb25457a3ed5c0424bde66864736f6c634300050a0032")
|
||||||
CodeHash = common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127")
|
CodeHash = common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127")
|
||||||
ContractCodeForInternalLeafNode = common.Hex2Bytes("608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506040518060200160405280600160ff16815250600190600161007492919061007a565b506100e6565b8262019c5e81019282156100b0579160200282015b828111156100af578251829060ff1690559160200191906001019061008f565b5b5090506100bd91906100c1565b5090565b6100e391905b808211156100df5760008160009055506001016100c7565b5090565b90565b6101cc806100f56000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101766022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018362019c5e811061016c57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a7231582007250e2c86ac8989891c4aa9c4737119491578200b9104c574143607ed71642b64736f6c63430005110032")
|
ContractCodeForInternalLeafNode = utils.Hex2Bytes("608060405234801561001057600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506040518060200160405280600160ff16815250600190600161007492919061007a565b506100e6565b8262019c5e81019282156100b0579160200282015b828111156100af578251829060ff1690559160200191906001019061008f565b5b5090506100bd91906100c1565b5090565b6100e391905b808211156100df5760008160009055506001016100c7565b5090565b90565b6101cc806100f56000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101766022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018362019c5e811061016c57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a7231582007250e2c86ac8989891c4aa9c4737119491578200b9104c574143607ed71642b64736f6c63430005110032")
|
||||||
ByteCodeAfterDeploymentForInternalLeafNode = common.Hex2Bytes("608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101766022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018362019c5e811061016c57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a7231582007250e2c86ac8989891c4aa9c4737119491578200b9104c574143607ed71642b64736f6c63430005110032")
|
ByteCodeAfterDeploymentForInternalLeafNode = utils.Hex2Bytes("608060405234801561001057600080fd5b50600436106100365760003560e01c806343d726d61461003b578063c16431b914610045575b600080fd5b61004361007d565b005b61007b6004803603604081101561005b57600080fd5b81019080803590602001909291908035906020019092919050505061015c565b005b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610122576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260228152602001806101766022913960400191505060405180910390fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b8060018362019c5e811061016c57fe5b0181905550505056fe4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6e2ea265627a7a7231582007250e2c86ac8989891c4aa9c4737119491578200b9104c574143607ed71642b64736f6c63430005110032")
|
||||||
CodeHashForInternalizedLeafNode = common.HexToHash("8327d45b7e6ffe26fc9728db4cd3c1c8177f7af2de0d31dfe5435e83101db04f")
|
CodeHashForInternalizedLeafNode = common.HexToHash("8327d45b7e6ffe26fc9728db4cd3c1c8177f7af2de0d31dfe5435e83101db04f")
|
||||||
ContractAddr common.Address
|
ContractAddr common.Address
|
||||||
|
|
||||||
|
13
test_helpers/util.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package test_helpers
|
||||||
|
|
||||||
|
import (
|
||||||
|
geth_log "github.com/ethereum/go-ethereum/log"
|
||||||
|
|
||||||
|
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The geth sync logs are noisy, it can be useful to silence them
|
||||||
|
func SilenceLogs() {
|
||||||
|
geth_log.Root().SetHandler(geth_log.DiscardHandler())
|
||||||
|
log.TestLogger.SetLevel(2)
|
||||||
|
}
|
@ -24,9 +24,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metrics2 "github.com/ethereum/go-ethereum/statediff/indexer/database/metrics"
|
metrics2 "github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/statediff/types"
|
"github.com/cerc-io/plugeth-statediff/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SortKeys sorts the keys in the account map
|
// SortKeys sorts the keys in the account map
|
||||||
|
@ -67,12 +67,6 @@ type IPLD struct {
|
|||||||
Content []byte
|
Content []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// CodeAndCodeHash struct to hold codehash => code mappings
|
|
||||||
type CodeAndCodeHash struct {
|
|
||||||
Hash common.Hash
|
|
||||||
Code []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type StateNodeSink func(node StateLeafNode) error
|
type StateNodeSink func(node StateLeafNode) error
|
||||||
type StorageNodeSink func(node StorageLeafNode) error
|
type StorageNodeSink func(node StorageLeafNode) error
|
||||||
type IPLDSink func(IPLD) error
|
type IPLDSink func(IPLD) error
|
||||||
|
26
utils/bytes.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import "encoding/hex"
|
||||||
|
|
||||||
|
// FromHex returns the bytes represented by the hexadecimal string s.
|
||||||
|
// s may be prefixed with "0x".
|
||||||
|
func FromHex(s string) []byte {
|
||||||
|
if has0xPrefix(s) {
|
||||||
|
s = s[2:]
|
||||||
|
}
|
||||||
|
if len(s)%2 == 1 {
|
||||||
|
s = "0" + s
|
||||||
|
}
|
||||||
|
return Hex2Bytes(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// has0xPrefix validates str begins with '0x' or '0X'.
|
||||||
|
func has0xPrefix(str string) bool {
|
||||||
|
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
|
||||||
|
func Hex2Bytes(str string) []byte {
|
||||||
|
h, _ := hex.DecodeString(str)
|
||||||
|
return h
|
||||||
|
}
|
64
utils/encoding.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
// HexToCompact converts a hex path to the compact encoded format
|
||||||
|
func HexToCompact(hex []byte) []byte {
|
||||||
|
return hexToCompact(hex)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexToCompact(hex []byte) []byte {
|
||||||
|
terminator := byte(0)
|
||||||
|
if hasTerm(hex) {
|
||||||
|
terminator = 1
|
||||||
|
hex = hex[:len(hex)-1]
|
||||||
|
}
|
||||||
|
buf := make([]byte, len(hex)/2+1)
|
||||||
|
buf[0] = terminator << 5 // the flag byte
|
||||||
|
if len(hex)&1 == 1 {
|
||||||
|
buf[0] |= 1 << 4 // odd flag
|
||||||
|
buf[0] |= hex[0] // first nibble is contained in the first byte
|
||||||
|
hex = hex[1:]
|
||||||
|
}
|
||||||
|
decodeNibbles(hex, buf[1:])
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompactToHex converts a compact encoded path to hex format
|
||||||
|
func CompactToHex(compact []byte) []byte {
|
||||||
|
return compactToHex(compact)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compactToHex(compact []byte) []byte {
|
||||||
|
if len(compact) == 0 {
|
||||||
|
return compact
|
||||||
|
}
|
||||||
|
base := KeybytesToHex(compact)
|
||||||
|
// delete terminator flag
|
||||||
|
if base[0] < 2 {
|
||||||
|
base = base[:len(base)-1]
|
||||||
|
}
|
||||||
|
// apply odd flag
|
||||||
|
chop := 2 - base[0]&1
|
||||||
|
return base[chop:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func KeybytesToHex(str []byte) []byte {
|
||||||
|
l := len(str)*2 + 1
|
||||||
|
var nibbles = make([]byte, l)
|
||||||
|
for i, b := range str {
|
||||||
|
nibbles[i*2] = b / 16
|
||||||
|
nibbles[i*2+1] = b % 16
|
||||||
|
}
|
||||||
|
nibbles[l-1] = 16
|
||||||
|
return nibbles
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNibbles(nibbles []byte, bytes []byte) {
|
||||||
|
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
|
||||||
|
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTerm returns whether a hex key has the terminator flag.
|
||||||
|
func hasTerm(s []byte) bool {
|
||||||
|
return len(s) > 0 && s[len(s)-1] == 16
|
||||||
|
}
|
1
utils/iterator.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package utils
|
68
utils/log/log.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/inconshreveable/log15"
|
||||||
|
"github.com/openrelayxyz/plugeth-utils/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger = core.Logger
|
||||||
|
|
||||||
|
var (
|
||||||
|
DefaultLogger core.Logger
|
||||||
|
|
||||||
|
TestLogger = Log15Logger()
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// The plugeth logger is only initialized with the geth runtime,
|
||||||
|
// but tests expect to have a logger available, so default to this.
|
||||||
|
DefaultLogger = TestLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func Trace(m string, a ...interface{}) { DefaultLogger.Trace(m, a...) }
|
||||||
|
func Debug(m string, a ...interface{}) { DefaultLogger.Debug(m, a...) }
|
||||||
|
func Info(m string, a ...interface{}) { DefaultLogger.Info(m, a...) }
|
||||||
|
func Warn(m string, a ...interface{}) { DefaultLogger.Warn(m, a...) }
|
||||||
|
func Crit(m string, a ...interface{}) { DefaultLogger.Crit(m, a...) }
|
||||||
|
func Error(m string, a ...interface{}) { DefaultLogger.Error(m, a...) }
|
||||||
|
|
||||||
|
func SetDefaultLogger(l core.Logger) {
|
||||||
|
DefaultLogger = l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log15Logger returns a logger satisfying the same interface as geth's
|
||||||
|
func Log15Logger(ctx ...interface{}) wrapLog15 {
|
||||||
|
return wrapLog15{log15.New(ctx...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type wrapLog15 struct{ log15.Logger }
|
||||||
|
|
||||||
|
func (l wrapLog15) New(ctx ...interface{}) Logger {
|
||||||
|
return wrapLog15{l.Logger.New(ctx...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l wrapLog15) Trace(m string, a ...interface{}) {
|
||||||
|
l.Logger.Debug(m, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l wrapLog15) SetLevel(lvl int) {
|
||||||
|
l.SetHandler(log15.LvlFilterHandler(log15.Lvl(lvl), l.GetHandler()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a Logger that includes the contextual args in all output
|
||||||
|
// (workaround for missing method in plugeth)
|
||||||
|
func New(ctx ...interface{}) Logger {
|
||||||
|
return ctxLogger{DefaultLogger, ctx}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ctxLogger struct {
|
||||||
|
base Logger
|
||||||
|
ctx []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l ctxLogger) Trace(m string, a ...interface{}) { l.base.Trace(m, append(l.ctx, a...)...) }
|
||||||
|
func (l ctxLogger) Debug(m string, a ...interface{}) { l.base.Debug(m, append(l.ctx, a...)...) }
|
||||||
|
func (l ctxLogger) Info(m string, a ...interface{}) { l.base.Info(m, append(l.ctx, a...)...) }
|
||||||
|
func (l ctxLogger) Warn(m string, a ...interface{}) { l.base.Warn(m, append(l.ctx, a...)...) }
|
||||||
|
func (l ctxLogger) Crit(m string, a ...interface{}) { l.base.Crit(m, append(l.ctx, a...)...) }
|
||||||
|
func (l ctxLogger) Error(m string, a ...interface{}) { l.base.Error(m, append(l.ctx, a...)...) }
|
23
utils/utils.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fatalf formats a message to standard error and exits the program.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Fatal: "+format+"\n", args...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MustDecode[T any](buf []byte) *T {
|
||||||
|
var ret T
|
||||||
|
err := rlp.DecodeBytes(buf, &ret)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("error decoding RLP %T: %w", ret, err))
|
||||||
|
}
|
||||||
|
return &ret
|
||||||
|
}
|
nitpick: in v5
key
is a full CID, not a blockstore-prefixed multihash