Compare commits
2 Commits
main
...
roysc/conc
Author | SHA1 | Date | |
---|---|---|---|
1d84d12f75 | |||
6ebf8471fb |
@ -1,8 +1,3 @@
|
||||
Dockerfile
|
||||
.git
|
||||
Makefile
|
||||
scripts
|
||||
**/*_test.go
|
||||
**/*.so
|
||||
test
|
||||
build
|
||||
*.so
|
||||
|
@ -8,10 +8,10 @@ on:
|
||||
- main
|
||||
- ci-test
|
||||
|
||||
# Needed until we can incorporate docker startup into the executor container
|
||||
env:
|
||||
SO_VERSION: v1.1.0-36d4969-202407091537
|
||||
FIXTURENET_ETH_STACKS_REF: main
|
||||
SYSTEM_TESTS_REF: main
|
||||
DOCKER_HOST: unix:///var/run/dind.sock
|
||||
SO_VERSION: v1.1.0-c30c779-202309082138
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
@ -23,9 +23,18 @@ jobs:
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
- name: Run dockerd
|
||||
run: |
|
||||
dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||
sleep 5
|
||||
- name: Run DB container
|
||||
run: |
|
||||
docker compose -f test/compose.yml up --wait
|
||||
- name: Set up Gitea access token
|
||||
env:
|
||||
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: |
|
||||
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf https://git.vdb.to/
|
||||
- name: Run tests
|
||||
run: go test -p 1 -v ./...
|
||||
|
||||
@ -33,49 +42,47 @@ jobs:
|
||||
name: Run integration tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: ./plugeth-statediff
|
||||
progress: false
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: cerc-io/plugeth
|
||||
ref: statediff
|
||||
path: ./plugeth
|
||||
- name: Run dockerd
|
||||
run: dockerd -H $DOCKER_HOST --userland-proxy=false &
|
||||
# These images need access tokens configured
|
||||
- name: Build docker image
|
||||
env:
|
||||
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: |
|
||||
docker build ./plugeth-statediff -t cerc/plugeth-statediff:local
|
||||
[[ -n "$TOKEN" ]]
|
||||
docker build ./plugeth-statediff -t cerc/plugeth-statediff:local \
|
||||
--build-arg GIT_VDBTO_TOKEN="$TOKEN"
|
||||
docker build ./plugeth -t cerc/plugeth:local \
|
||||
--build-arg GIT_VDBTO_TOKEN="$TOKEN"
|
||||
|
||||
- name: "Install Python for ARM on Linux"
|
||||
if: ${{ runner.arch == 'arm64' && runner.os == 'Linux' }}
|
||||
uses: deadsnakes/action@v3.0.1
|
||||
with:
|
||||
python-version: 3.11
|
||||
- name: "Install Python cases other than ARM on Linux"
|
||||
if: ${{ ! (runner.arch == 'arm64' && runner.os == 'Linux') }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.11
|
||||
- name: "Print Python version"
|
||||
run: python3 --version
|
||||
- name: Install stack-orchestrator
|
||||
run: |
|
||||
curl -L -O https://github.com/cerc-io/stack-orchestrator/releases/download/$SO_VERSION/laconic-so
|
||||
chmod +x laconic-so
|
||||
echo PATH="$PATH:$(pwd)" >> $GITHUB_ENV
|
||||
- name: Clone system-tests
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: cerc-io/system-tests
|
||||
ref: ${{ env.SYSTEM_TESTS_REF }}
|
||||
ref: plugeth-compat
|
||||
path: ./system-tests
|
||||
token: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
progress: false
|
||||
- name: Clone fixturenet stack repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: cerc-io/fixturenet-eth-stacks
|
||||
ref: ${{ env.FIXTURENET_ETH_STACKS_REF }}
|
||||
path: ./fixturenet-eth-stacks
|
||||
progress: false
|
||||
- name: Run testnet stack
|
||||
working-directory: ./plugeth-statediff
|
||||
run: ./scripts/run-test-stack.sh
|
||||
env:
|
||||
LACONIC_SO: ../laconic-so
|
||||
run: ./scripts/integration-setup.sh
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.10
|
||||
- name: Run tests
|
||||
working-directory: ./system-tests
|
||||
run: |
|
||||
@ -87,14 +94,13 @@ jobs:
|
||||
name: Run compliance tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
path: ./plugeth-statediff
|
||||
- name: Check out compliance tests
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: cerc-io/eth-statediff-compliance
|
||||
ref: v0.3.0
|
||||
ref: v0.1.0
|
||||
path: ./eth-statediff-compliance
|
||||
token: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
- uses: actions/setup-go@v4
|
||||
@ -103,17 +109,24 @@ jobs:
|
||||
check-latest: true
|
||||
- name: Install jq
|
||||
run: apt-get update && apt-get install -yq jq
|
||||
|
||||
- name: Update go.mod in nested modules
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
- name: Set up Gitea access token
|
||||
env:
|
||||
TOKEN: ${{ secrets.CICD_REPO_TOKEN }}
|
||||
run: |
|
||||
set -x
|
||||
./scripts/update-mod.sh ../plugeth-statediff dumpdiff-plugeth/
|
||||
./scripts/update-mod.sh ../plugeth-statediff dumpdiff-plugeth-parallel/
|
||||
git config --global url."https://$TOKEN:@git.vdb.to/".insteadOf https://git.vdb.to/
|
||||
|
||||
- name: Update go.mod for dumpdiff-geth
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
run: ./scripts/update-mod.sh ../plugeth-statediff dumpdiff-geth/
|
||||
- name: Update go.mod for dumpdiff-plugeth
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
run: ./scripts/update-mod.sh ../plugeth-statediff dumpdiff-plugeth/
|
||||
- name: Update go.mod for dumpdiff-plugeth-parallel
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
run: ./scripts/update-mod.sh ../plugeth-statediff dumpdiff-plugeth-parallel/
|
||||
- name: Build tools
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
run: make all
|
||||
|
||||
- name: Compare output of geth and plugeth
|
||||
working-directory: ./eth-statediff-compliance/
|
||||
run: ./scripts/compare-diffs.sh geth plugeth
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Using image with same alpine as plugeth,
|
||||
# but go 1.21 to evade https://github.com/Consensys/gnark-crypto/issues/468
|
||||
FROM golang:1.21-alpine as builder
|
||||
# Using the same base golang image as plugeth
|
||||
FROM golang:1.20-alpine3.18 as builder
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev binutils-gold linux-headers git
|
||||
|
||||
@ -8,7 +7,7 @@ RUN apk add --no-cache gcc musl-dev binutils-gold linux-headers git
|
||||
ARG GIT_VDBTO_TOKEN
|
||||
|
||||
# Get and cache deps
|
||||
WORKDIR /plugeth-statediff
|
||||
WORKDIR /plugeth-statediff/
|
||||
COPY go.mod go.sum ./
|
||||
RUN if [ -n "$GIT_VDBTO_TOKEN" ]; then git config --global url."https://$GIT_VDBTO_TOKEN:@git.vdb.to/".insteadOf "https://git.vdb.to/"; fi && \
|
||||
go mod download && \
|
||||
|
5
Makefile
5
Makefile
@ -1,6 +1,5 @@
|
||||
MOCKGEN ?= mockgen
|
||||
MOCKS_DIR := $(CURDIR)/test_helpers/mocks
|
||||
BUILD_DIR ?= ./build
|
||||
|
||||
mocks: $(MOCKS_DIR)/gen_backend.go
|
||||
.PHONY: mocks
|
||||
@ -17,8 +16,8 @@ docker-image: mocks
|
||||
# Local build
|
||||
BUILD_FLAGS := --trimpath
|
||||
|
||||
plugin: $(BUILD_DIR)/lib/statediff.so
|
||||
plugin: build/statediff.so
|
||||
.PHONY: plugin
|
||||
|
||||
$(BUILD_DIR)/lib/statediff.so: ./**/*.go
|
||||
build/statediff.so: ./**/*.go
|
||||
go build --tags linkgeth --buildmode=plugin -o $@ $(BUILD_FLAGS) ./main
|
||||
|
@ -17,7 +17,7 @@ type StateView interface {
|
||||
// StateTrie is an interface exposing only the necessary methods from state.Trie
|
||||
type StateTrie interface {
|
||||
GetKey([]byte) []byte
|
||||
NodeIterator([]byte) (trie.NodeIterator, error)
|
||||
NodeIterator([]byte) trie.NodeIterator
|
||||
}
|
||||
|
||||
// adapts a state.Database to StateView - used in tests
|
||||
@ -36,7 +36,7 @@ func (a stateDatabaseView) OpenTrie(root common.Hash) (StateTrie, error) {
|
||||
}
|
||||
|
||||
func (a stateDatabaseView) ContractCode(hash common.Hash) ([]byte, error) {
|
||||
return a.db.ContractCode(common.Address{}, hash)
|
||||
return a.db.ContractCode(common.Hash{}, hash)
|
||||
}
|
||||
|
||||
// adapts geth Trie to plugeth
|
||||
@ -46,8 +46,8 @@ type adaptTrie struct {
|
||||
|
||||
func NewStateTrie(t plugeth.Trie) StateTrie { return adaptTrie{t} }
|
||||
|
||||
func (a adaptTrie) NodeIterator(start []byte) (trie.NodeIterator, error) {
|
||||
return NodeIterator(a.Trie.NodeIterator(start)), nil
|
||||
func (a adaptTrie) NodeIterator(start []byte) trie.NodeIterator {
|
||||
return NodeIterator(a.Trie.NodeIterator(start))
|
||||
}
|
||||
|
||||
func NodeIterator(it plugeth.NodeIterator) trie.NodeIterator {
|
||||
|
@ -7,16 +7,14 @@ import (
|
||||
)
|
||||
|
||||
func ChainConfig(cc *plugeth_params.ChainConfig) *params.ChainConfig {
|
||||
ret := ¶ms.ChainConfig{
|
||||
return ¶ms.ChainConfig{
|
||||
ChainID: cc.ChainID,
|
||||
|
||||
HomesteadBlock: cc.HomesteadBlock,
|
||||
DAOForkBlock: cc.DAOForkBlock,
|
||||
DAOForkSupport: cc.DAOForkSupport,
|
||||
EIP150Block: cc.EIP150Block,
|
||||
EIP155Block: cc.EIP155Block,
|
||||
EIP158Block: cc.EIP158Block,
|
||||
|
||||
ByzantiumBlock: cc.ByzantiumBlock,
|
||||
ConstantinopleBlock: cc.ConstantinopleBlock,
|
||||
PetersburgBlock: cc.PetersburgBlock,
|
||||
@ -24,23 +22,5 @@ func ChainConfig(cc *plugeth_params.ChainConfig) *params.ChainConfig {
|
||||
MuirGlacierBlock: cc.MuirGlacierBlock,
|
||||
BerlinBlock: cc.BerlinBlock,
|
||||
LondonBlock: cc.LondonBlock,
|
||||
|
||||
ArrowGlacierBlock: cc.ArrowGlacierBlock,
|
||||
GrayGlacierBlock: cc.GrayGlacierBlock,
|
||||
MergeNetsplitBlock: cc.MergeNetsplitBlock,
|
||||
|
||||
ShanghaiTime: cc.ShanghaiTime,
|
||||
CancunTime: cc.CancunTime,
|
||||
PragueTime: cc.PragueTime,
|
||||
|
||||
TerminalTotalDifficulty: cc.TerminalTotalDifficulty,
|
||||
TerminalTotalDifficultyPassed: cc.TerminalTotalDifficultyPassed,
|
||||
}
|
||||
if cc.Ethash != nil {
|
||||
ret.Ethash = ¶ms.EthashConfig{}
|
||||
}
|
||||
if cc.Clique != nil {
|
||||
ret.Clique = ¶ms.CliqueConfig{cc.Clique.Period, cc.Clique.Epoch}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
125
builder.go
125
builder.go
@ -69,8 +69,20 @@ type accountUpdate struct {
|
||||
new sdtypes.AccountWrapper
|
||||
oldRoot common.Hash
|
||||
}
|
||||
|
||||
type accountUpdateMap map[string]*accountUpdate
|
||||
|
||||
type accountUpdateLens struct {
|
||||
state accountUpdateMap
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (l *accountUpdateLens) update(fn func(accountUpdateMap)) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
fn(l.state)
|
||||
}
|
||||
|
||||
func appender[T any](to *[]T) func(T) error {
|
||||
return func(a T) error {
|
||||
*to = append(*to, a)
|
||||
@ -138,17 +150,14 @@ func (sdb *builder) WriteStateDiff(
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening new state trie: %w", err)
|
||||
}
|
||||
subitersA, err := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating subtrie iterators for old state trie: %w", err)
|
||||
}
|
||||
subitersB, err := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating subtrie iterators for new state trie: %w", err)
|
||||
}
|
||||
subitersA := iterutils.SubtrieIterators(triea.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
subitersB := iterutils.SubtrieIterators(trieb.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
|
||||
updates := accountUpdateLens{
|
||||
state: make(accountUpdateMap),
|
||||
}
|
||||
logger := log.New("hash", args.BlockHash, "number", args.BlockNumber)
|
||||
// errgroup will cancel if any group fails
|
||||
// errgroup will cancel if any worker fails
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
for i := uint(0); i < sdb.subtrieWorkers; i++ {
|
||||
func(subdiv uint) {
|
||||
@ -158,28 +167,51 @@ func (sdb *builder) WriteStateDiff(
|
||||
return sdb.processAccounts(ctx,
|
||||
it, &it.SymmDiffState,
|
||||
params.watchedAddressesLeafPaths,
|
||||
nodeSink, ipldSink, logger,
|
||||
nodeSink, ipldSink, &updates,
|
||||
logger,
|
||||
)
|
||||
})
|
||||
}(i)
|
||||
}
|
||||
return g.Wait()
|
||||
|
||||
if err = g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, update := range updates.state {
|
||||
var storageDiff []sdtypes.StorageLeafNode
|
||||
err := sdb.processStorageUpdates(
|
||||
update.oldRoot, update.new.Account.Root,
|
||||
appender(&storageDiff), ipldSink,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error processing incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
||||
}
|
||||
|
||||
if err = nodeSink(sdtypes.StateLeafNode{
|
||||
AccountWrapper: update.new,
|
||||
StorageDiff: storageDiff,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteStateDiff writes a statediff object to output sinks
|
||||
func (sdb *builder) WriteStateSnapshot(
|
||||
ctx context.Context,
|
||||
stateRoot common.Hash, params Params,
|
||||
nodeSink sdtypes.StateNodeSink,
|
||||
ipldSink sdtypes.IPLDSink,
|
||||
tracker tracker.IteratorTracker,
|
||||
) error {
|
||||
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.WriteStateDiffTimer)
|
||||
|
||||
// Load tries for old and new states
|
||||
tree, err := sdb.stateCache.OpenTrie(stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening new state trie: %w", err)
|
||||
}
|
||||
|
||||
subiters, _, err := tracker.Restore(tree.NodeIterator)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error restoring iterators: %w", err)
|
||||
@ -192,16 +224,17 @@ func (sdb *builder) WriteStateSnapshot(
|
||||
sdb.subtrieWorkers, len(subiters))
|
||||
}
|
||||
} else {
|
||||
subiters, err = iterutils.SubtrieIterators(tree.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating subtrie iterators for trie: %w", err)
|
||||
}
|
||||
subiters = iterutils.SubtrieIterators(tree.NodeIterator, uint(sdb.subtrieWorkers))
|
||||
for i := range subiters {
|
||||
subiters[i] = tracker.Tracked(subiters[i])
|
||||
}
|
||||
}
|
||||
// errgroup will cancel if any group fails
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
updates := accountUpdateLens{
|
||||
state: make(accountUpdateMap),
|
||||
}
|
||||
|
||||
// errgroup will cancel if any worker fails
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
for i := range subiters {
|
||||
func(subdiv uint) {
|
||||
g.Go(func() error {
|
||||
@ -209,7 +242,8 @@ func (sdb *builder) WriteStateSnapshot(
|
||||
return sdb.processAccounts(ctx,
|
||||
subiters[subdiv], &symdiff,
|
||||
params.watchedAddressesLeafPaths,
|
||||
nodeSink, ipldSink, log.DefaultLogger,
|
||||
nodeSink, ipldSink, &updates,
|
||||
log.DefaultLogger,
|
||||
)
|
||||
})
|
||||
}(uint(i))
|
||||
@ -224,13 +258,13 @@ func (sdb *builder) processAccounts(
|
||||
it trie.NodeIterator, symdiff *utils.SymmDiffState,
|
||||
watchedAddressesLeafPaths [][]byte,
|
||||
nodeSink sdtypes.StateNodeSink, ipldSink sdtypes.IPLDSink,
|
||||
updateLens *accountUpdateLens,
|
||||
logger log.Logger,
|
||||
) error {
|
||||
logger.Trace("statediff/processAccounts BEGIN")
|
||||
defer metrics.ReportAndUpdateDuration("statediff/processAccounts END",
|
||||
time.Now(), logger, metrics.IndexerMetrics.ProcessAccountsTimer)
|
||||
|
||||
updates := make(accountUpdateMap)
|
||||
// Cache the RLP of the previous node. When we hit a value node this will be the parent blob.
|
||||
var prevBlob = it.NodeBlob()
|
||||
for it.Next(true) {
|
||||
@ -254,12 +288,14 @@ func (sdb *builder) processAccounts(
|
||||
copy(leafKey, it.LeafKey())
|
||||
|
||||
if symdiff.CommonPath() {
|
||||
updateLens.update(func(updates accountUpdateMap) {
|
||||
// If B also contains this leaf node, this is the old state of an updated account.
|
||||
if update, ok := updates[string(leafKey)]; ok {
|
||||
update.oldRoot = account.Root
|
||||
} else {
|
||||
updates[string(leafKey)] = &accountUpdate{oldRoot: account.Root}
|
||||
}
|
||||
})
|
||||
} else {
|
||||
// This node was removed, meaning the account was deleted. Emit empty
|
||||
// "removed" records for the state node and all storage all storage slots.
|
||||
@ -279,12 +315,14 @@ func (sdb *builder) processAccounts(
|
||||
}
|
||||
|
||||
if symdiff.CommonPath() {
|
||||
updateLens.update(func(updates accountUpdateMap) {
|
||||
// If A also contains this leaf node, this is the new state of an updated account.
|
||||
if update, ok := updates[string(accountW.LeafKey)]; ok {
|
||||
update.new = *accountW
|
||||
} else {
|
||||
updates[string(accountW.LeafKey)] = &accountUpdate{new: *accountW}
|
||||
}
|
||||
})
|
||||
} else { // account was created
|
||||
err := sdb.processAccountCreation(accountW, ipldSink, nodeSink)
|
||||
if err != nil {
|
||||
@ -327,24 +365,6 @@ func (sdb *builder) processAccounts(
|
||||
}
|
||||
prevBlob = nodeVal
|
||||
}
|
||||
|
||||
for key, update := range updates {
|
||||
var storageDiff []sdtypes.StorageLeafNode
|
||||
err := sdb.processStorageUpdates(
|
||||
update.oldRoot, update.new.Account.Root,
|
||||
appender(&storageDiff), ipldSink,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error processing incremental storage diffs for account with leafkey %x\r\nerror: %w", key, err)
|
||||
}
|
||||
|
||||
if err = nodeSink(sdtypes.StateLeafNode{
|
||||
AccountWrapper: update.new,
|
||||
StorageDiff: storageDiff,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return it.Error()
|
||||
}
|
||||
|
||||
@ -423,14 +443,12 @@ func (sdb *builder) processStorageCreations(
|
||||
log.Debug("Storage root for eventual diff", "root", sr)
|
||||
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening storage trie for root %s: %w", sr, err)
|
||||
log.Info("error in build storage diff eventual", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
var prevBlob []byte
|
||||
it, err := sTrie.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating iterator for storage trie with root %s: %w", sr, err)
|
||||
}
|
||||
it := sTrie.NodeIterator(make([]byte, 0))
|
||||
for it.Next(true) {
|
||||
if it.Leaf() {
|
||||
storageLeafNode := sdb.decodeStorageLeaf(it, prevBlob)
|
||||
@ -463,7 +481,7 @@ func (sdb *builder) processStorageUpdates(
|
||||
if newroot == oldroot {
|
||||
return nil
|
||||
}
|
||||
log.Debug("Storage roots for incremental diff", "old", oldroot, "new", newroot)
|
||||
log.Trace("Storage roots for incremental diff", "old", oldroot, "new", newroot)
|
||||
oldTrie, err := sdb.stateCache.OpenTrie(oldroot)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -474,14 +492,7 @@ func (sdb *builder) processStorageUpdates(
|
||||
}
|
||||
|
||||
var prevBlob []byte
|
||||
a, err := oldTrie.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := newTrie.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a, b := oldTrie.NodeIterator(nil), newTrie.NodeIterator(nil)
|
||||
it := utils.NewSymmetricDifferenceIterator(a, b)
|
||||
for it.Next(true) {
|
||||
if it.FromA() {
|
||||
@ -533,12 +544,10 @@ func (sdb *builder) processRemovedAccountStorage(
|
||||
log.Debug("Storage root for removed diffs", "root", sr)
|
||||
sTrie, err := sdb.stateCache.OpenTrie(sr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening storage trie for root %s: %w", sr, err)
|
||||
}
|
||||
it, err := sTrie.NodeIterator(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating iterator for storage trie with root %s: %w", sr, err)
|
||||
log.Info("error in build removed account storage diffs", "error", err)
|
||||
return err
|
||||
}
|
||||
it := sTrie.NodeIterator(nil)
|
||||
for it.Next(true) {
|
||||
if it.Leaf() { // only leaf values are indexed, don't need to demarcate removed intermediate nodes
|
||||
leafKey := make([]byte, len(it.LeafKey()))
|
||||
|
@ -18,13 +18,13 @@ package statediff_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
statediff "github.com/cerc-io/plugeth-statediff"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
@ -76,7 +76,7 @@ var (
|
||||
})
|
||||
contractAccountAtBlock2 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||
Root: crypto.Keccak256Hash(block2StorageBranchRootNode),
|
||||
}
|
||||
@ -87,7 +87,7 @@ var (
|
||||
})
|
||||
contractAccountAtBlock3 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||
Root: crypto.Keccak256Hash(block3StorageBranchRootNode),
|
||||
}
|
||||
@ -98,7 +98,7 @@ var (
|
||||
})
|
||||
contractAccountAtBlock4 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||
Root: crypto.Keccak256Hash(block4StorageBranchRootNode),
|
||||
}
|
||||
@ -109,7 +109,7 @@ var (
|
||||
})
|
||||
contractAccountAtBlock5 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||
Root: crypto.Keccak256Hash(block5StorageBranchRootNode),
|
||||
}
|
||||
@ -120,7 +120,7 @@ var (
|
||||
})
|
||||
minerAccountAtBlock1 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(2000002625000000000),
|
||||
Balance: big.NewInt(2000002625000000000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -131,7 +131,7 @@ var (
|
||||
})
|
||||
minerAccountAtBlock2 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(4000111203461610525),
|
||||
Balance: big.NewInt(4000111203461610525),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -154,7 +154,7 @@ var (
|
||||
})
|
||||
account1AtBlock2 = &types.StateAccount{
|
||||
Nonce: 2,
|
||||
Balance: uint256.NewInt(999555797000009000),
|
||||
Balance: big.NewInt(999555797000009000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -165,7 +165,7 @@ var (
|
||||
})
|
||||
account1AtBlock5 = &types.StateAccount{
|
||||
Nonce: 2,
|
||||
Balance: uint256.NewInt(2999586469962854280),
|
||||
Balance: big.NewInt(2999586469962854280),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -176,7 +176,7 @@ var (
|
||||
})
|
||||
account1AtBlock6 = &types.StateAccount{
|
||||
Nonce: 3,
|
||||
Balance: uint256.NewInt(2999557977962854280),
|
||||
Balance: big.NewInt(2999557977962854280),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -187,7 +187,7 @@ var (
|
||||
})
|
||||
account2AtBlock2 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(1000),
|
||||
Balance: big.NewInt(1000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -198,7 +198,7 @@ var (
|
||||
})
|
||||
account2AtBlock3 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(2000013574009435976),
|
||||
Balance: big.NewInt(2000013574009435976),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -209,7 +209,7 @@ var (
|
||||
})
|
||||
account2AtBlock4 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(4000048088163070348),
|
||||
Balance: big.NewInt(4000048088163070348),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -220,7 +220,7 @@ var (
|
||||
})
|
||||
account2AtBlock6 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(6000063258066544204),
|
||||
Balance: big.NewInt(6000063258066544204),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -231,7 +231,7 @@ var (
|
||||
})
|
||||
bankAccountAtBlock0 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.MustFromBig(test_helpers.TestBankFunds),
|
||||
Balance: big.NewInt(test_helpers.TestBankFunds.Int64()),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -241,10 +241,10 @@ var (
|
||||
bankAccountAtBlock0RLP,
|
||||
})
|
||||
|
||||
block1BankBalance = test_helpers.TestBankFunds.Int64() - test_helpers.BalanceChange10000 - test_helpers.GasFees
|
||||
block1BankBalance = big.NewInt(test_helpers.TestBankFunds.Int64() - test_helpers.BalanceChange10000 - test_helpers.GasFees)
|
||||
bankAccountAtBlock1 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(uint64(block1BankBalance)),
|
||||
Balance: block1BankBalance,
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -254,10 +254,10 @@ var (
|
||||
bankAccountAtBlock1RLP,
|
||||
})
|
||||
|
||||
block2BankBalance = block1BankBalance - test_helpers.BalanceChange1Ether - test_helpers.GasFees
|
||||
block2BankBalance = block1BankBalance.Int64() - test_helpers.BalanceChange1Ether - test_helpers.GasFees
|
||||
bankAccountAtBlock2 = &types.StateAccount{
|
||||
Nonce: 2,
|
||||
Balance: uint256.NewInt(uint64(block2BankBalance)),
|
||||
Balance: big.NewInt(block2BankBalance),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -268,7 +268,7 @@ var (
|
||||
})
|
||||
bankAccountAtBlock3 = &types.StateAccount{
|
||||
Nonce: 3,
|
||||
Balance: uint256.NewInt(999914255999990000),
|
||||
Balance: big.NewInt(999914255999990000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -279,7 +279,7 @@ var (
|
||||
})
|
||||
bankAccountAtBlock4 = &types.StateAccount{
|
||||
Nonce: 6,
|
||||
Balance: uint256.NewInt(999826859999990000),
|
||||
Balance: big.NewInt(999826859999990000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -290,7 +290,7 @@ var (
|
||||
})
|
||||
bankAccountAtBlock5 = &types.StateAccount{
|
||||
Nonce: 8,
|
||||
Balance: uint256.NewInt(999761283999990000),
|
||||
Balance: big.NewInt(999761283999990000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -1617,7 +1617,7 @@ var (
|
||||
|
||||
contractAccountAtBlock01 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: common.HexToHash("0xaaea5efba4fd7b45d7ec03918ac5d8b31aa93b48986af0e6b591f0f087c80127").Bytes(),
|
||||
Root: crypto.Keccak256Hash(block01StorageBranchRootNode),
|
||||
}
|
||||
@ -1629,7 +1629,7 @@ var (
|
||||
|
||||
bankAccountAtBlock01 = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(3999629697375000000),
|
||||
Balance: big.NewInt(3999629697375000000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -1640,7 +1640,7 @@ var (
|
||||
})
|
||||
bankAccountAtBlock02 = &types.StateAccount{
|
||||
Nonce: 2,
|
||||
Balance: uint256.NewInt(5999607323457344852),
|
||||
Balance: big.NewInt(5999607323457344852),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -1866,8 +1866,8 @@ contract test {
|
||||
*/
|
||||
|
||||
var (
|
||||
b = uint256.NewInt(0).Sub(uint256.MustFromBig(test_helpers.TestBIGBankFunds), test_helpers.BalanceChangeBIG)
|
||||
block1BankBigBalance = uint256.NewInt(0).Sub(b, uint256.NewInt(uint64(test_helpers.GasFees2)))
|
||||
b = big.NewInt(0).Sub(test_helpers.TestBIGBankFunds, test_helpers.BalanceChangeBIG)
|
||||
block1BankBigBalance = big.NewInt(0).Sub(b, big.NewInt(test_helpers.GasFees2))
|
||||
bankAccountAtBlock1b = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: block1BankBigBalance,
|
||||
@ -1892,7 +1892,7 @@ var (
|
||||
account1AtBlock1bRLP,
|
||||
})
|
||||
|
||||
account1AtBlock2bBalance = uint256.MustFromDecimal("1999999999999999999999999761539571000000000")
|
||||
account1AtBlock2bBalance, _ = big.NewInt(0).SetString("1999999999999999999999999761539571000000000", 10)
|
||||
account1AtBlock2b = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: account1AtBlock2bBalance,
|
||||
@ -1907,7 +1907,7 @@ var (
|
||||
|
||||
minerAccountAtBlock2b = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(4055891787808414571),
|
||||
Balance: big.NewInt(4055891787808414571),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -1919,7 +1919,7 @@ var (
|
||||
|
||||
contractAccountAtBlock2b = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: test_helpers.CodeHashForInternalizedLeafNode.Bytes(),
|
||||
Root: crypto.Keccak256Hash(block2StorageBranchRootNode),
|
||||
}
|
||||
@ -1929,7 +1929,7 @@ var (
|
||||
contractAccountAtBlock2bRLP,
|
||||
})
|
||||
|
||||
bankAccountAtBlock3bBalance = uint256.MustFromDecimal("18000000000000000000000001999920365757724976")
|
||||
bankAccountAtBlock3bBalance, _ = big.NewInt(0).SetString("18000000000000000000000001999920365757724976", 10)
|
||||
bankAccountAtBlock3b = &types.StateAccount{
|
||||
Nonce: 3,
|
||||
Balance: bankAccountAtBlock3bBalance,
|
||||
@ -1944,7 +1944,7 @@ var (
|
||||
|
||||
contractAccountAtBlock3b = &types.StateAccount{
|
||||
Nonce: 1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: test_helpers.CodeHashForInternalizedLeafNode.Bytes(),
|
||||
Root: crypto.Keccak256Hash(block3bStorageBranchRootNode),
|
||||
}
|
||||
@ -1954,8 +1954,8 @@ var (
|
||||
contractAccountAtBlock3bRLP,
|
||||
})
|
||||
|
||||
slot40364 = common.BytesToHash(uint256.NewInt(40364).Bytes())
|
||||
slot105566 = common.BytesToHash(uint256.NewInt(105566).Bytes())
|
||||
slot40364 = common.BytesToHash(big.NewInt(40364).Bytes())
|
||||
slot105566 = common.BytesToHash(big.NewInt(105566).Bytes())
|
||||
|
||||
slot40364StorageValue = utils.Hex2Bytes("01")
|
||||
slot105566StorageValue = utils.Hex2Bytes("02")
|
||||
|
90
go.mod
90
go.mod
@ -1,14 +1,13 @@
|
||||
module github.com/cerc-io/plugeth-statediff
|
||||
|
||||
go 1.21
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/cerc-io/eth-iterator-utils v0.3.1
|
||||
github.com/cerc-io/eth-testing v0.5.1
|
||||
github.com/ethereum/go-ethereum v1.13.14
|
||||
github.com/cerc-io/eth-iterator-utils v0.1.1
|
||||
github.com/cerc-io/eth-testing v0.2.1
|
||||
github.com/ethereum/go-ethereum v1.11.6
|
||||
github.com/georgysavva/scany v0.2.9
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/inconshreveable/log15 v2.16.0+incompatible
|
||||
github.com/ipfs/go-cid v0.2.0
|
||||
github.com/jackc/pgconn v1.10.0
|
||||
@ -17,58 +16,49 @@ require (
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/lib/pq v1.10.6
|
||||
github.com/multiformats/go-multihash v0.1.0
|
||||
github.com/openrelayxyz/plugeth-utils v1.5.0
|
||||
github.com/openrelayxyz/plugeth-utils v1.2.0
|
||||
github.com/pganalyze/pg_query_go/v4 v4.2.1
|
||||
github.com/shopspring/decimal v1.2.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/thoas/go-funk v0.9.3
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.5.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.10.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cockroachdb/errors v1.9.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
||||
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
|
||||
github.com/ferranbt/fastssz v0.1.2 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
@ -84,21 +74,19 @@ require (
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.0.3 // indirect
|
||||
github.com/multiformats/go-base36 v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.0.3 // indirect
|
||||
github.com/multiformats/go-varint v0.0.6 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/openrelayxyz/cardinal-types v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
@ -107,39 +95,37 @@ require (
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rs/cors v1.8.2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.5 // indirect
|
||||
github.com/tklauser/numcpus v0.2.2 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/urfave/cli/v2 v2.25.7 // indirect
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.1.6 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/ethereum/go-ethereum => git.vdb.to/cerc-io/plugeth v1.13.14-cerc-2
|
||||
github.com/openrelayxyz/plugeth-utils => git.vdb.to/cerc-io/plugeth-utils v1.5.0-cerc-1
|
||||
github.com/cerc-io/eth-iterator-utils => git.vdb.to/cerc-io/eth-iterator-utils v0.1.2
|
||||
github.com/cerc-io/eth-testing => git.vdb.to/cerc-io/eth-testing v0.3.1
|
||||
github.com/ethereum/go-ethereum => git.vdb.to/cerc-io/plugeth v0.0.0-20230808125822-691dc334fab1
|
||||
github.com/openrelayxyz/plugeth-utils => git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46
|
||||
)
|
||||
|
209
go.sum
209
go.sum
@ -1,46 +1,39 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.vdb.to/cerc-io/plugeth v1.13.14-cerc-2 h1:wUnIMCUP+e/F6f/JA1Ui51AagmYkxctEcyg66QJJj0o=
|
||||
git.vdb.to/cerc-io/plugeth v1.13.14-cerc-2/go.mod h1:sUMNKCsvK1Afdogl+n8QTm9hmCX4fa0X3SqE+xru89k=
|
||||
git.vdb.to/cerc-io/plugeth-utils v1.5.0-cerc-1 h1:WMdo9Pb5lAn0e2WC1CcD6/mRTWwU0r2KjFoEh0mh2rs=
|
||||
git.vdb.to/cerc-io/plugeth-utils v1.5.0-cerc-1/go.mod h1:Wf47tlE95PHZto1PMFRlmQAf98MBoNSRbwnQxeq0+Z0=
|
||||
git.vdb.to/cerc-io/eth-iterator-utils v0.1.2 h1:PdMR5B9wrQSYuYpFhN+9Kc8AEZ0pTt5eKCmu8oCtFcY=
|
||||
git.vdb.to/cerc-io/eth-iterator-utils v0.1.2/go.mod h1:OvXbdWbZ5viBXC/Ui1EkhsSmGB+AUX+TjGa3UDAfjfg=
|
||||
git.vdb.to/cerc-io/eth-testing v0.3.1 h1:sPnlMev6oEgTjsW7GtUkSsjKNG/+X6P9q0izSejLGpM=
|
||||
git.vdb.to/cerc-io/eth-testing v0.3.1/go.mod h1:qdvpc/W1xvf2MKx3rMOqvFvYaYIHG77Z1g0lwsmw0Uk=
|
||||
git.vdb.to/cerc-io/plugeth v0.0.0-20230808125822-691dc334fab1 h1:KLjxHwp9Zp7xhECccmJS00RiL+VwTuUGLU7qeIctg8g=
|
||||
git.vdb.to/cerc-io/plugeth v0.0.0-20230808125822-691dc334fab1/go.mod h1:cYXZu70+6xmDgIgrTD81GPasv16piiAFJnKyAbwVPMU=
|
||||
git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46 h1:KYcbbne/RXd7AuxbUd/3hgk1jPN+33k2CKiNsUsMCC0=
|
||||
git.vdb.to/cerc-io/plugeth-utils v0.0.0-20230706160122-cd41de354c46/go.mod h1:VpDN61dxy64zGff05F0adujR5enD/JEdXBkTQ+PaIsQ=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
|
||||
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.44.36/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88=
|
||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cerc-io/eth-iterator-utils v0.3.1 h1:h4Bp0+fUiwkyug1uCEO2LZr2qxoW1yKszV2EO/2CDB0=
|
||||
github.com/cerc-io/eth-iterator-utils v0.3.1/go.mod h1:UNrjsP5bApZkqqqfU7nmnPN/dIIo9GOUUD79tmoX/s4=
|
||||
github.com/cerc-io/eth-testing v0.5.1 h1:xxcQf9ymJS0911yWIrUiGvCvqfvEjYmHvhBJkCD/whs=
|
||||
github.com/cerc-io/eth-testing v0.5.1/go.mod h1:p86je2PjSM7u8Qd7rMIG/Zw+tQlBoS5Emkh1ECnC5t0=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@ -49,25 +42,18 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.3 h1:ZA346ACHIZctef6trOTwBAEvPVm1k0uLm/bb2Atc+S8=
|
||||
github.com/cockroachdb/cockroach-go/v2 v2.0.3/go.mod h1:hAuDgiVgDVkfirP9JnhXEfcXEPRKBpYdGz+l7mvYSzw=
|
||||
github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA=
|
||||
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
|
||||
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
|
||||
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM=
|
||||
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -76,10 +62,6 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
@ -99,7 +81,10 @@ github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xb
|
||||
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@ -107,14 +92,10 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
|
||||
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
@ -122,8 +103,6 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=
|
||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc=
|
||||
github.com/georgysavva/scany v0.2.9 h1:Xt6rjYpHnMClTm/g+oZTnoSxUwiln5GqMNU+QeLNHQU=
|
||||
github.com/georgysavva/scany v0.2.9/go.mod h1:yeOeC1BdIdl6hOwy8uefL2WNSlseFzbhlG/frrh65SA=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
@ -138,13 +117,11 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclK
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
@ -168,8 +145,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
@ -187,9 +164,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@ -203,39 +180,33 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8=
|
||||
github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
|
||||
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
|
||||
github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA=
|
||||
github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||
github.com/inconshreveable/log15 v2.16.0+incompatible h1:6nvMKxtGcpgm7q0KiGs+Vc+xDvUXaBqsPKHWKsinccw=
|
||||
github.com/inconshreveable/log15 v2.16.0+incompatible/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
@ -327,13 +298,12 @@ github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+
|
||||
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||
@ -362,12 +332,9 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
@ -387,7 +354,6 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
@ -396,13 +362,11 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible h1:xQ15muvnzGBHpIpdrNi1DA5x0+TcBZzsIDwmw9uTHzw=
|
||||
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
@ -421,9 +385,6 @@ github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxd
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@ -458,8 +419,6 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/openrelayxyz/cardinal-types v1.1.1 h1:Lw6Lr/eiHYCnLi851rciCzw/1S3UytUX7kj5zh3QS/Y=
|
||||
github.com/openrelayxyz/cardinal-types v1.1.1/go.mod h1:8aaMg6i94V0hhWe3V6Fzc0RSggMx+/Kabsf5o7wMf/E=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@ -484,20 +443,13 @@ github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8u
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
|
||||
github.com/pubnub/go-metrics-statsd v0.0.0-20170124014003-7da61f429d6b/go.mod h1:5UoZ1X6PWZWpPxwpR8qZ/qTN2BXIrrYTV9j+6TaQngA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
@ -506,7 +458,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/savaki/cloudmetrics v0.0.0-20160314183336-c82bfea3c09e/go.mod h1:KzTM/+pS9NbNPoC7/EBZq77Za7His7hp1NJhA0DrMns=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
|
||||
@ -544,26 +495,24 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw=
|
||||
github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
|
||||
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA=
|
||||
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q=
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
|
||||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
@ -616,11 +565,11 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@ -631,8 +580,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -656,10 +603,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220615171555-694bf12d69de/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -669,8 +614,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -698,6 +643,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -706,22 +653,15 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@ -729,12 +669,12 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -756,20 +696,17 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
|
||||
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
@ -798,7 +735,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
@ -808,12 +744,13 @@ gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
@ -827,5 +764,3 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
lukechampine.com/blake3 v1.1.6 h1:H3cROdztr7RCfoaTpGZFQsrqvweFLrqS73j7L7cmR5c=
|
||||
lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
|
@ -70,12 +70,12 @@ func NewStateDiffIndexer(
|
||||
var driver sql.Driver
|
||||
switch pgc.Driver {
|
||||
case postgres.PGX:
|
||||
driver, err = postgres.ConnectPGXDriver(ctx, pgc, nodeInfo)
|
||||
driver, err = postgres.NewPGXDriver(ctx, pgc, nodeInfo)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case postgres.SQLX:
|
||||
driver, err = postgres.ConnectSQLXDriver(ctx, pgc, nodeInfo)
|
||||
driver, err = postgres.NewSQLXDriver(ctx, pgc, nodeInfo)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -68,21 +67,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
blockHashStr := blockHash.String()
|
||||
height := block.NumberU64()
|
||||
traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
|
||||
|
||||
var blobGasPrice *big.Int
|
||||
excessBlobGas := block.ExcessBlobGas()
|
||||
if excessBlobGas != nil {
|
||||
blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
|
||||
}
|
||||
transactions := block.Transactions()
|
||||
|
||||
// Derive any missing fields
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.Time(), block.BaseFee(), blobGasPrice, transactions); err != nil {
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.BaseFee(), transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
txNodes, rctNodes, logNodes, wdNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||
}
|
||||
@ -123,17 +115,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
|
||||
t = time.Now()
|
||||
// Publish and index receipts and txs
|
||||
err = sdi.processObjects(blockTx, processArgs{
|
||||
err = sdi.processReceiptsAndTxs(blockTx, processArgs{
|
||||
headerID: headerID,
|
||||
blockNumber: block.Number(),
|
||||
blockTime: block.Time(),
|
||||
receipts: receipts,
|
||||
txs: transactions,
|
||||
withdrawals: block.Withdrawals(),
|
||||
rctNodes: rctNodes,
|
||||
txNodes: txNodes,
|
||||
logNodes: logNodes,
|
||||
wdNodes: wdNodes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -153,7 +142,7 @@ func (sdi *StateDiffIndexer) PushHeader(batch interfaces.Batch, header *types.He
|
||||
if !ok {
|
||||
return "", fmt.Errorf("sql: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||
}
|
||||
headerNode, err := ipld.EncodeHeader(header)
|
||||
headerNode, err := ipld.NewEthHeader(header)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -175,7 +164,6 @@ func (sdi *StateDiffIndexer) PushHeader(batch interfaces.Batch, header *types.He
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
Canonical: true,
|
||||
WithdrawalsRoot: shared.MaybeStringHash(header.WithdrawalsHash),
|
||||
}
|
||||
_, err = fmt.Fprintf(sdi.dump, "%+v\r\n", mod)
|
||||
return headerID, err
|
||||
@ -225,20 +213,17 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
|
||||
type processArgs struct {
|
||||
headerID string
|
||||
blockNumber *big.Int
|
||||
blockTime uint64
|
||||
receipts types.Receipts
|
||||
txs types.Transactions
|
||||
withdrawals types.Withdrawals
|
||||
rctNodes []ipld.IPLD
|
||||
txNodes []ipld.IPLD
|
||||
logNodes [][]ipld.IPLD
|
||||
wdNodes []ipld.IPLD
|
||||
rctNodes []*ipld.EthReceipt
|
||||
txNodes []*ipld.EthTx
|
||||
logNodes [][]*ipld.EthLog
|
||||
}
|
||||
|
||||
// processObjects publishes and indexes receipt and transaction IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error {
|
||||
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
|
||||
// Process receipts and txs
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber, args.blockTime)
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||
for i, receipt := range args.receipts {
|
||||
txNode := args.txNodes[i]
|
||||
tx.cacheIPLD(txNode)
|
||||
@ -273,20 +258,6 @@ func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error
|
||||
return err
|
||||
}
|
||||
|
||||
if trx.Type() == types.BlobTxType {
|
||||
blobHashes := trx.BlobHashes()
|
||||
for i, hash := range blobHashes {
|
||||
bhModel := models.BlobHashModel{
|
||||
TxHash: trxID,
|
||||
Index: uint64(i),
|
||||
BlobHash: hash,
|
||||
}
|
||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", bhModel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this is the contract address if this receipt is for a contract creation tx
|
||||
contract := shared.HandleZeroAddr(receipt.ContractAddress)
|
||||
|
||||
@ -333,23 +304,6 @@ func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Process withdrawals
|
||||
for i, withdrawal := range args.withdrawals {
|
||||
wdNode := args.wdNodes[i]
|
||||
tx.cacheIPLD(wdNode)
|
||||
wdModel := models.WithdrawalModel{
|
||||
BlockNumber: args.blockNumber.String(),
|
||||
HeaderID: args.headerID,
|
||||
CID: wdNode.Cid().String(),
|
||||
Index: withdrawal.Index,
|
||||
Validator: withdrawal.Validator,
|
||||
Address: withdrawal.Address.String(),
|
||||
Amount: withdrawal.Amount,
|
||||
}
|
||||
if _, err := fmt.Fprintf(sdi.dump, "%+v\r\n", wdModel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -56,8 +55,6 @@ func setupLegacyCSVIndexer(t *testing.T) {
|
||||
func setupLegacyCSV(t *testing.T) {
|
||||
setupLegacyCSVIndexer(t)
|
||||
test.SetupLegacyTestData(t, ind)
|
||||
t.Cleanup(func() { tearDownCSV(t) })
|
||||
time.Sleep(delayForDockerSync)
|
||||
}
|
||||
|
||||
func dumpCSVFileData(t *testing.T) {
|
||||
@ -67,7 +64,7 @@ func dumpCSVFileData(t *testing.T) {
|
||||
|
||||
localOutputDir := filepath.Join(workingDir, file.CSVTestConfig.OutputDir)
|
||||
|
||||
for _, tbl := range schema.EthTables {
|
||||
for _, tbl := range file.Tables {
|
||||
err := test_helpers.DedupFile(file.TableFilePath(localOutputDir, tbl.Name))
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -92,7 +89,6 @@ func dumpCSVFileData(t *testing.T) {
|
||||
func resetAndDumpWatchedAddressesCSVFileData(t *testing.T) {
|
||||
test_helpers.TearDownDB(t, db)
|
||||
|
||||
time.Sleep(delayForDockerSync)
|
||||
outputFilePath := filepath.Join(dbDirectory, file.CSVTestConfig.WatchedAddressesFilePath)
|
||||
stmt := fmt.Sprintf(pgCopyStatement, schema.TableWatchedAddresses.Name, outputFilePath)
|
||||
|
||||
@ -115,6 +111,7 @@ func TestLegacyCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index header IPLDs", func(t *testing.T) {
|
||||
setupLegacyCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.TestLegacyIndexer(t, db)
|
||||
})
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -31,9 +30,6 @@ import (
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/test"
|
||||
)
|
||||
|
||||
// docker bind mount is slow to sync files
|
||||
var delayForDockerSync = 1 * time.Second
|
||||
|
||||
func setupCSVIndexer(t *testing.T) {
|
||||
if _, err := os.Stat(file.CSVTestConfig.OutputDir); !errors.Is(err, os.ErrNotExist) {
|
||||
err := os.RemoveAll(file.CSVTestConfig.OutputDir)
|
||||
@ -57,21 +53,18 @@ func setupCSVIndexer(t *testing.T) {
|
||||
func setupCSV(t *testing.T) {
|
||||
setupCSVIndexer(t)
|
||||
test.SetupTestData(t, ind)
|
||||
t.Cleanup(func() { tearDownCSV(t) })
|
||||
time.Sleep(delayForDockerSync)
|
||||
}
|
||||
|
||||
func setupCSVNonCanonical(t *testing.T) {
|
||||
setupCSVIndexer(t)
|
||||
test.SetupTestDataNonCanonical(t, ind)
|
||||
t.Cleanup(func() { tearDownCSV(t) })
|
||||
time.Sleep(delayForDockerSync)
|
||||
}
|
||||
|
||||
func TestCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index header IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexHeaderIPLDs(t, db)
|
||||
})
|
||||
@ -79,6 +72,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index transaction IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexTransactionIPLDs(t, db)
|
||||
})
|
||||
@ -86,6 +80,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index log IPLDs for multiple receipt of a specific block", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexLogIPLDs(t, db)
|
||||
})
|
||||
@ -93,20 +88,15 @@ func TestCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index receipt IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index withdrawal IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
|
||||
test.DoTestPublishAndIndexWithdrawalIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexStateIPLDs(t, db)
|
||||
})
|
||||
@ -114,6 +104,7 @@ func TestCSVFileIndexer(t *testing.T) {
|
||||
t.Run("Publish and index storage IPLDs in a single tx", func(t *testing.T) {
|
||||
setupCSV(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexStorageIPLDs(t, db)
|
||||
})
|
||||
@ -123,6 +114,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index header", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.TestPublishAndIndexHeaderNonCanonical(t, db)
|
||||
})
|
||||
@ -130,6 +122,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index transactions", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexTransactionsNonCanonical(t, db)
|
||||
})
|
||||
@ -137,6 +130,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index receipts", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexReceiptsNonCanonical(t, db)
|
||||
})
|
||||
@ -144,6 +138,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index logs", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexLogsNonCanonical(t, db)
|
||||
})
|
||||
@ -151,6 +146,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index state nodes", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexStateNonCanonical(t, db)
|
||||
})
|
||||
@ -158,6 +154,7 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
|
||||
t.Run("Publish and index storage nodes", func(t *testing.T) {
|
||||
setupCSVNonCanonical(t)
|
||||
dumpCSVFileData(t)
|
||||
defer tearDownCSV(t)
|
||||
|
||||
test.DoTestPublishAndIndexStorageNonCanonical(t, db)
|
||||
})
|
||||
|
@ -36,8 +36,22 @@ import (
|
||||
sdtypes "github.com/cerc-io/plugeth-statediff/types"
|
||||
)
|
||||
|
||||
var (
|
||||
Tables = []*schema.Table{
|
||||
&schema.TableIPLDBlock,
|
||||
&schema.TableNodeInfo,
|
||||
&schema.TableHeader,
|
||||
&schema.TableStateNode,
|
||||
&schema.TableStorageNode,
|
||||
&schema.TableUncle,
|
||||
&schema.TableTransaction,
|
||||
&schema.TableReceipt,
|
||||
&schema.TableLog,
|
||||
}
|
||||
)
|
||||
|
||||
type tableRow struct {
|
||||
table *schema.Table
|
||||
table schema.Table
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
@ -120,7 +134,7 @@ func NewCSVWriter(path string, watchedAddressesFilePath string, diff bool) (*CSV
|
||||
return nil, fmt.Errorf("unable to create directory '%s': %w", path, err)
|
||||
}
|
||||
|
||||
writers, err := makeFileWriters(path, schema.EthTables)
|
||||
writers, err := makeFileWriters(path, Tables)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -150,7 +164,7 @@ func (csw *CSVWriter) Loop() {
|
||||
for {
|
||||
select {
|
||||
case row := <-csw.rows:
|
||||
err := csw.writers.write(row.table, row.values...)
|
||||
err := csw.writers.write(&row.table, row.values...)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error writing csv buffer: %v", err))
|
||||
}
|
||||
@ -190,13 +204,13 @@ func (csw *CSVWriter) Close() error {
|
||||
func (csw *CSVWriter) upsertNode(node nodeinfo.Info) {
|
||||
var values []interface{}
|
||||
values = append(values, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID)
|
||||
csw.rows <- tableRow{&schema.TableNodeInfo, values}
|
||||
csw.rows <- tableRow{schema.TableNodeInfo, values}
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertIPLD(ipld models.IPLDModel) {
|
||||
var values []interface{}
|
||||
values = append(values, ipld.BlockNumber, ipld.Key, ipld.Data)
|
||||
csw.rows <- tableRow{&schema.TableIPLDBlock, values}
|
||||
csw.rows <- tableRow{schema.TableIPLDBlock, values}
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertIPLDDirect(blockNumber, key string, value []byte) {
|
||||
@ -217,25 +231,11 @@ func (csw *CSVWriter) upsertIPLDNode(blockNumber string, i ipld.IPLD) {
|
||||
|
||||
func (csw *CSVWriter) upsertHeaderCID(header models.HeaderModel) {
|
||||
var values []interface{}
|
||||
values = append(values,
|
||||
header.BlockNumber,
|
||||
header.BlockHash,
|
||||
header.ParentHash,
|
||||
header.CID,
|
||||
header.TotalDifficulty,
|
||||
header.NodeIDs,
|
||||
header.Reward,
|
||||
header.StateRoot,
|
||||
header.TxRoot,
|
||||
header.RctRoot,
|
||||
header.UnclesHash,
|
||||
header.Bloom,
|
||||
strconv.FormatUint(header.Timestamp, 10),
|
||||
header.Coinbase,
|
||||
header.Canonical,
|
||||
header.WithdrawalsRoot,
|
||||
)
|
||||
csw.rows <- tableRow{&schema.TableHeader, values}
|
||||
values = append(values, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
|
||||
header.TotalDifficulty, header.NodeIDs, header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UnclesHash, header.Bloom, strconv.FormatUint(header.Timestamp, 10), header.Coinbase,
|
||||
header.Canonical)
|
||||
csw.rows <- tableRow{schema.TableHeader, values}
|
||||
metrics.IndexerMetrics.BlocksCounter.Inc(1)
|
||||
}
|
||||
|
||||
@ -243,29 +243,22 @@ func (csw *CSVWriter) upsertUncleCID(uncle models.UncleModel) {
|
||||
var values []interface{}
|
||||
values = append(values, uncle.BlockNumber, uncle.BlockHash, uncle.HeaderID, uncle.ParentHash, uncle.CID,
|
||||
uncle.Reward, uncle.Index)
|
||||
csw.rows <- tableRow{&schema.TableUncle, values}
|
||||
csw.rows <- tableRow{schema.TableUncle, values}
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertTransactionCID(transaction models.TxModel) {
|
||||
var values []interface{}
|
||||
values = append(values, transaction.BlockNumber, transaction.HeaderID, transaction.TxHash, transaction.CID, transaction.Dst,
|
||||
transaction.Src, transaction.Index, transaction.Type, transaction.Value)
|
||||
csw.rows <- tableRow{&schema.TableTransaction, values}
|
||||
csw.rows <- tableRow{schema.TableTransaction, values}
|
||||
metrics.IndexerMetrics.TransactionsCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertBlobHash(blobHash models.BlobHashModel) {
|
||||
var values []interface{}
|
||||
values = append(values, blobHash.TxHash, blobHash.Index, blobHash.BlobHash)
|
||||
csw.rows <- tableRow{&schema.TableBlobHash, values}
|
||||
metrics.IndexerMetrics.BlobHashCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertReceiptCID(rct *models.ReceiptModel) {
|
||||
var values []interface{}
|
||||
values = append(values, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.CID, rct.Contract,
|
||||
rct.PostState, rct.PostStatus)
|
||||
csw.rows <- tableRow{&schema.TableReceipt, values}
|
||||
csw.rows <- tableRow{schema.TableReceipt, values}
|
||||
metrics.IndexerMetrics.ReceiptsCounter.Inc(1)
|
||||
}
|
||||
|
||||
@ -274,26 +267,11 @@ func (csw *CSVWriter) upsertLogCID(logs []*models.LogsModel) {
|
||||
var values []interface{}
|
||||
values = append(values, l.BlockNumber, l.HeaderID, l.CID, l.ReceiptID, l.Address, l.Index, l.Topic0,
|
||||
l.Topic1, l.Topic2, l.Topic3)
|
||||
csw.rows <- tableRow{&schema.TableLog, values}
|
||||
csw.rows <- tableRow{schema.TableLog, values}
|
||||
metrics.IndexerMetrics.LogsCounter.Inc(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertWithdrawalCID(withdrawal models.WithdrawalModel) {
|
||||
var values []interface{}
|
||||
values = append(values,
|
||||
withdrawal.BlockNumber,
|
||||
withdrawal.HeaderID,
|
||||
withdrawal.CID,
|
||||
withdrawal.Index,
|
||||
withdrawal.Validator,
|
||||
withdrawal.Address,
|
||||
withdrawal.Amount,
|
||||
)
|
||||
csw.rows <- tableRow{&schema.TableWithdrawal, values}
|
||||
metrics.IndexerMetrics.WithdrawalsCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertStateCID(stateNode models.StateNodeModel) {
|
||||
balance := stateNode.Balance
|
||||
if stateNode.Removed {
|
||||
@ -303,14 +281,14 @@ func (csw *CSVWriter) upsertStateCID(stateNode models.StateNodeModel) {
|
||||
var values []interface{}
|
||||
values = append(values, stateNode.BlockNumber, stateNode.HeaderID, stateNode.StateKey, stateNode.CID,
|
||||
csw.isDiff, balance, strconv.FormatUint(stateNode.Nonce, 10), stateNode.CodeHash, stateNode.StorageRoot, stateNode.Removed)
|
||||
csw.rows <- tableRow{&schema.TableStateNode, values}
|
||||
csw.rows <- tableRow{schema.TableStateNode, values}
|
||||
}
|
||||
|
||||
func (csw *CSVWriter) upsertStorageCID(storageCID models.StorageNodeModel) {
|
||||
var values []interface{}
|
||||
values = append(values, storageCID.BlockNumber, storageCID.HeaderID, storageCID.StateKey, storageCID.StorageKey, storageCID.CID,
|
||||
csw.isDiff, storageCID.Value, storageCID.Removed)
|
||||
csw.rows <- tableRow{&schema.TableStorageNode, values}
|
||||
csw.rows <- tableRow{schema.TableStorageNode, values}
|
||||
}
|
||||
|
||||
// LoadWatchedAddresses loads watched addresses from a file
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -84,7 +83,7 @@ func NewStateDiffIndexer(chainConfig *params.ChainConfig, config Config, nodeInf
|
||||
if _, err := os.Stat(outputDir); !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fmt.Errorf("cannot create output directory, directory (%s) already exists", outputDir)
|
||||
}
|
||||
log.Info("Writing statediff CSV files", "directory", outputDir)
|
||||
log.Info("Writing statediff CSV files to directory", "file", outputDir)
|
||||
|
||||
if watchedAddressesFilePath == "" {
|
||||
watchedAddressesFilePath = defaultWatchedAddressesCSVFilePath
|
||||
@ -142,21 +141,14 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
blockHashStr := blockHash.String()
|
||||
height := block.NumberU64()
|
||||
traceMsg := fmt.Sprintf("indexer stats for statediff at %d with hash %s:\r\n", height, blockHashStr)
|
||||
|
||||
var blobGasPrice *big.Int
|
||||
excessBlobGas := block.ExcessBlobGas()
|
||||
if excessBlobGas != nil {
|
||||
blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
|
||||
}
|
||||
transactions := block.Transactions()
|
||||
|
||||
// Derive any missing fields
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.Time(), block.BaseFee(), blobGasPrice, transactions); err != nil {
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.BaseFee(), transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
txNodes, rctNodes, logNodes, wdNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %v", err)
|
||||
}
|
||||
@ -191,26 +183,21 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
t = time.Now()
|
||||
|
||||
// write uncles
|
||||
err = sdi.processUncles(headerID, block.Number(), block.UncleHash(), block.Uncles())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sdi.processUncles(headerID, block.Number(), block.UncleHash(), block.Uncles())
|
||||
tDiff = time.Since(t)
|
||||
metrics.IndexerMetrics.UncleProcessingTimer.Update(tDiff)
|
||||
traceMsg += fmt.Sprintf("uncle processing time: %s\r\n", tDiff.String())
|
||||
t = time.Now()
|
||||
|
||||
err = sdi.processObjects(processArgs{
|
||||
// write receipts and txs
|
||||
err = sdi.processReceiptsAndTxs(processArgs{
|
||||
headerID: headerID,
|
||||
blockNumber: block.Number(),
|
||||
blockTime: block.Time(),
|
||||
receipts: receipts,
|
||||
txs: transactions,
|
||||
withdrawals: block.Withdrawals(),
|
||||
rctNodes: rctNodes,
|
||||
txNodes: txNodes,
|
||||
logNodes: logNodes,
|
||||
wdNodes: wdNodes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -227,7 +214,7 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
// it returns the headerID
|
||||
func (sdi *StateDiffIndexer) PushHeader(_ interfaces.Batch, header *types.Header, reward, td *big.Int) (string, error) {
|
||||
// Process the header
|
||||
headerNode, err := ipld.EncodeHeader(header)
|
||||
headerNode, err := ipld.NewEthHeader(header)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -250,7 +237,6 @@ func (sdi *StateDiffIndexer) PushHeader(_ interfaces.Batch, header *types.Header
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
Canonical: true,
|
||||
WithdrawalsRoot: shared.MaybeStringHash(header.WithdrawalsHash),
|
||||
})
|
||||
return headerID, nil
|
||||
}
|
||||
@ -296,20 +282,17 @@ func (sdi *StateDiffIndexer) processUncles(headerID string, blockNumber *big.Int
|
||||
type processArgs struct {
|
||||
headerID string
|
||||
blockNumber *big.Int
|
||||
blockTime uint64
|
||||
receipts types.Receipts
|
||||
txs types.Transactions
|
||||
withdrawals types.Withdrawals
|
||||
rctNodes []ipld.IPLD
|
||||
txNodes []ipld.IPLD
|
||||
logNodes [][]ipld.IPLD
|
||||
wdNodes []ipld.IPLD
|
||||
rctNodes []*ipld.EthReceipt
|
||||
txNodes []*ipld.EthTx
|
||||
logNodes [][]*ipld.EthLog
|
||||
}
|
||||
|
||||
// processObjects writes receipt and tx IPLD insert SQL stmts to a file
|
||||
func (sdi *StateDiffIndexer) processObjects(args processArgs) error {
|
||||
// processReceiptsAndTxs writes receipt and tx IPLD insert SQL stmts to a file
|
||||
func (sdi *StateDiffIndexer) processReceiptsAndTxs(args processArgs) error {
|
||||
// Process receipts and txs
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber, args.blockTime)
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||
for i, receipt := range args.receipts {
|
||||
txNode := args.txNodes[i]
|
||||
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), txNode)
|
||||
@ -342,17 +325,6 @@ func (sdi *StateDiffIndexer) processObjects(args processArgs) error {
|
||||
}
|
||||
sdi.fileWriter.upsertTransactionCID(txModel)
|
||||
|
||||
if trx.Type() == types.BlobTxType {
|
||||
blobHashes := trx.BlobHashes()
|
||||
for i, hash := range blobHashes {
|
||||
sdi.fileWriter.upsertBlobHash(models.BlobHashModel{
|
||||
TxHash: txID,
|
||||
Index: uint64(i),
|
||||
BlobHash: hash,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// this is the contract address if this receipt is for a contract creation tx
|
||||
contract := shared.HandleZeroAddr(receipt.ContractAddress)
|
||||
|
||||
@ -395,21 +367,6 @@ func (sdi *StateDiffIndexer) processObjects(args processArgs) error {
|
||||
}
|
||||
sdi.fileWriter.upsertLogCID(logDataSet)
|
||||
}
|
||||
// Process withdrawals
|
||||
for i, wd := range args.withdrawals {
|
||||
wdNode := args.wdNodes[i]
|
||||
sdi.fileWriter.upsertIPLDNode(args.blockNumber.String(), wdNode)
|
||||
wdModel := models.WithdrawalModel{
|
||||
BlockNumber: args.blockNumber.String(),
|
||||
HeaderID: args.headerID,
|
||||
CID: wdNode.Cid().String(),
|
||||
Index: wd.Index,
|
||||
Validator: wd.Validator,
|
||||
Address: wd.Address.String(),
|
||||
Amount: wd.Amount,
|
||||
}
|
||||
sdi.fileWriter.upsertWithdrawalCID(wdModel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -39,10 +39,8 @@ type FileWriter interface {
|
||||
upsertHeaderCID(header models.HeaderModel)
|
||||
upsertUncleCID(uncle models.UncleModel)
|
||||
upsertTransactionCID(transaction models.TxModel)
|
||||
upsertBlobHash(models.BlobHashModel)
|
||||
upsertReceiptCID(rct *models.ReceiptModel)
|
||||
upsertLogCID(logs []*models.LogsModel)
|
||||
upsertWithdrawalCID(models.WithdrawalModel)
|
||||
upsertStateCID(stateNode models.StateNodeModel)
|
||||
upsertStorageCID(storageCID models.StorageNodeModel)
|
||||
upsertIPLD(ipld models.IPLDModel)
|
||||
|
@ -93,14 +93,6 @@ func TestSQLFileIndexer(t *testing.T) {
|
||||
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index withdrawal IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
dumpFileData(t)
|
||||
defer tearDown(t)
|
||||
|
||||
test.DoTestPublishAndIndexWithdrawalIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||
setup(t)
|
||||
dumpFileData(t)
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
nodeinfo "github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||
"github.com/cerc-io/plugeth-statediff/types"
|
||||
)
|
||||
|
||||
@ -146,8 +145,8 @@ const (
|
||||
ipldInsert = "INSERT INTO ipld.blocks (block_number, key, data) VALUES ('%s', '%s', '\\x%x');\n"
|
||||
|
||||
headerInsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, " +
|
||||
"state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical, withdrawals_root) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s', %t, '%s');\n"
|
||||
"state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\\x%x', %d, '%s', %t);\n"
|
||||
|
||||
uncleInsert = "INSERT INTO eth.uncle_cids (block_number, block_hash, header_id, parent_hash, cid, reward, index) VALUES " +
|
||||
"('%s', '%s', '%s', '%s', '%s', '%s', %d);\n"
|
||||
@ -168,11 +167,6 @@ const (
|
||||
"removed, diff, val) VALUES ('%s', '%s', '%s', '%s', '%s', %t, %t, '\\x%x');\n"
|
||||
)
|
||||
|
||||
var (
|
||||
withdrawalsInsert = schema.TableWithdrawal.FmtStringInsert() + ";\n"
|
||||
blobHashesInsert = schema.TableBlobHash.FmtStringInsert() + ";\n"
|
||||
)
|
||||
|
||||
func (sqw *SQLWriter) upsertNode(node nodeinfo.Info) {
|
||||
sqw.stmts <- []byte(fmt.Sprintf(nodeInsert, node.GenesisBlock, node.NetworkID, node.ID, node.ClientName, node.ChainID))
|
||||
}
|
||||
@ -198,24 +192,9 @@ func (sqw *SQLWriter) upsertIPLDNode(blockNumber string, i ipld.IPLD) {
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertHeaderCID(header models.HeaderModel) {
|
||||
stmt := fmt.Sprintf(headerInsert,
|
||||
header.BlockNumber,
|
||||
header.BlockHash,
|
||||
header.ParentHash,
|
||||
header.CID,
|
||||
header.TotalDifficulty,
|
||||
formatPostgresStringArray(header.NodeIDs),
|
||||
header.Reward,
|
||||
header.StateRoot,
|
||||
header.TxRoot,
|
||||
header.RctRoot,
|
||||
header.UnclesHash,
|
||||
header.Bloom,
|
||||
header.Timestamp,
|
||||
header.Coinbase,
|
||||
header.Canonical,
|
||||
header.WithdrawalsRoot,
|
||||
)
|
||||
stmt := fmt.Sprintf(headerInsert, header.BlockNumber, header.BlockHash, header.ParentHash, header.CID,
|
||||
header.TotalDifficulty, formatPostgresStringArray(header.NodeIDs), header.Reward, header.StateRoot, header.TxRoot,
|
||||
header.RctRoot, header.UnclesHash, header.Bloom, header.Timestamp, header.Coinbase, header.Canonical)
|
||||
sqw.stmts <- []byte(stmt)
|
||||
metrics.IndexerMetrics.BlocksCounter.Inc(1)
|
||||
}
|
||||
@ -231,11 +210,6 @@ func (sqw *SQLWriter) upsertTransactionCID(transaction models.TxModel) {
|
||||
metrics.IndexerMetrics.TransactionsCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertBlobHash(bh models.BlobHashModel) {
|
||||
sqw.stmts <- []byte(fmt.Sprintf(blobHashesInsert, bh.TxHash, bh.Index, bh.BlobHash))
|
||||
metrics.IndexerMetrics.BlobHashCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertReceiptCID(rct *models.ReceiptModel) {
|
||||
sqw.stmts <- []byte(fmt.Sprintf(rctInsert, rct.BlockNumber, rct.HeaderID, rct.TxID, rct.CID, rct.Contract,
|
||||
rct.PostState, rct.PostStatus))
|
||||
@ -250,19 +224,6 @@ func (sqw *SQLWriter) upsertLogCID(logs []*models.LogsModel) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertWithdrawalCID(withdrawal models.WithdrawalModel) {
|
||||
sqw.stmts <- []byte(fmt.Sprintf(withdrawalsInsert,
|
||||
withdrawal.BlockNumber,
|
||||
withdrawal.HeaderID,
|
||||
withdrawal.CID,
|
||||
withdrawal.Index,
|
||||
withdrawal.Validator,
|
||||
withdrawal.Address,
|
||||
withdrawal.Amount,
|
||||
))
|
||||
metrics.IndexerMetrics.WithdrawalsCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (sqw *SQLWriter) upsertStateCID(stateNode models.StateNodeModel) {
|
||||
balance := stateNode.Balance
|
||||
if stateNode.Removed {
|
||||
|
@ -52,14 +52,10 @@ type IndexerMetricsHandles struct {
|
||||
BlocksCounter metrics.Counter
|
||||
// The total number of processed transactions
|
||||
TransactionsCounter metrics.Counter
|
||||
// The total number of indexed blob hashes
|
||||
BlobHashCounter metrics.Counter
|
||||
// The total number of processed receipts
|
||||
ReceiptsCounter metrics.Counter
|
||||
// The total number of processed logs
|
||||
LogsCounter metrics.Counter
|
||||
// The total number of processed logs
|
||||
WithdrawalsCounter metrics.Counter
|
||||
// The total number of access list entries processed
|
||||
AccessListEntriesCounter metrics.Counter
|
||||
// Time spent waiting for free postgres tx
|
||||
@ -92,10 +88,8 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
||||
ctx := IndexerMetricsHandles{
|
||||
BlocksCounter: metrics.NewCounter(),
|
||||
TransactionsCounter: metrics.NewCounter(),
|
||||
BlobHashCounter: metrics.NewCounter(),
|
||||
ReceiptsCounter: metrics.NewCounter(),
|
||||
LogsCounter: metrics.NewCounter(),
|
||||
WithdrawalsCounter: metrics.NewCounter(),
|
||||
AccessListEntriesCounter: metrics.NewCounter(),
|
||||
FreePostgresTimer: metrics.NewTimer(),
|
||||
PostgresCommitTimer: metrics.NewTimer(),
|
||||
@ -117,10 +111,8 @@ func RegisterIndexerMetrics(reg metrics.Registry) IndexerMetricsHandles {
|
||||
subsys := "indexer"
|
||||
reg.Register(metricName(subsys, "blocks"), ctx.BlocksCounter)
|
||||
reg.Register(metricName(subsys, "transactions"), ctx.TransactionsCounter)
|
||||
reg.Register(metricName(subsys, "blob_hashes"), ctx.BlobHashCounter)
|
||||
reg.Register(metricName(subsys, "receipts"), ctx.ReceiptsCounter)
|
||||
reg.Register(metricName(subsys, "logs"), ctx.LogsCounter)
|
||||
reg.Register(metricName(subsys, "withdrawals"), ctx.WithdrawalsCounter)
|
||||
reg.Register(metricName(subsys, "access_list_entries"), ctx.AccessListEntriesCounter)
|
||||
reg.Register(metricName(subsys, "t_free_postgres"), ctx.FreePostgresTimer)
|
||||
reg.Register(metricName(subsys, "t_postgres_commit"), ctx.PostgresCommitTimer)
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
@ -91,21 +90,16 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
t := time.Now()
|
||||
blockHash := block.Hash()
|
||||
height := block.NumberU64()
|
||||
|
||||
var blobGasPrice *big.Int
|
||||
excessBlobGas := block.ExcessBlobGas()
|
||||
if excessBlobGas != nil {
|
||||
blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
|
||||
}
|
||||
transactions := block.Transactions()
|
||||
var err error
|
||||
|
||||
// Derive any missing fields
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.Time(), block.BaseFee(), blobGasPrice, transactions); err != nil {
|
||||
if err := receipts.DeriveFields(sdi.chainConfig, blockHash, height, block.BaseFee(), transactions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the block iplds
|
||||
txNodes, rctNodes, logNodes, wdNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
txNodes, rctNodes, logNodes, err := ipld.FromBlockAndReceipts(block, receipts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating IPLD nodes from block and receipts: %w", err)
|
||||
}
|
||||
@ -148,18 +142,15 @@ func (sdi *StateDiffIndexer) PushBlock(block *types.Block, receipts types.Receip
|
||||
}
|
||||
metrics2.IndexerMetrics.UncleProcessingTimer.Update(time.Since(t))
|
||||
t = time.Now()
|
||||
|
||||
err = sdi.processObjects(batch, processArgs{
|
||||
// Publish and index receipts and txs
|
||||
err = sdi.processReceiptsAndTxs(batch, processArgs{
|
||||
headerID: headerID,
|
||||
blockNumber: block.Number(),
|
||||
blockTime: block.Time(),
|
||||
receipts: receipts,
|
||||
withdrawals: block.Withdrawals(),
|
||||
txs: transactions,
|
||||
rctNodes: rctNodes,
|
||||
txNodes: txNodes,
|
||||
logNodes: logNodes,
|
||||
wdNodes: wdNodes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -187,7 +178,7 @@ func (sdi *StateDiffIndexer) PushHeader(batch interfaces.Batch, header *types.He
|
||||
return "", fmt.Errorf("sql: batch is expected to be of type %T, got %T", &BatchTx{}, batch)
|
||||
}
|
||||
// Process the header
|
||||
headerNode, err := ipld.EncodeHeader(header)
|
||||
headerNode, err := ipld.NewEthHeader(header)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -210,7 +201,6 @@ func (sdi *StateDiffIndexer) PushHeader(batch interfaces.Batch, header *types.He
|
||||
Timestamp: header.Time,
|
||||
Coinbase: header.Coinbase.String(),
|
||||
Canonical: true,
|
||||
WithdrawalsRoot: shared.MaybeStringHash(header.WithdrawalsHash),
|
||||
})
|
||||
}
|
||||
|
||||
@ -258,20 +248,17 @@ func (sdi *StateDiffIndexer) processUncles(tx *BatchTx, headerID string, blockNu
|
||||
type processArgs struct {
|
||||
headerID string
|
||||
blockNumber *big.Int
|
||||
blockTime uint64
|
||||
receipts types.Receipts
|
||||
txs types.Transactions
|
||||
withdrawals types.Withdrawals
|
||||
rctNodes []ipld.IPLD
|
||||
txNodes []ipld.IPLD
|
||||
logNodes [][]ipld.IPLD
|
||||
wdNodes []ipld.IPLD
|
||||
rctNodes []*ipld.EthReceipt
|
||||
txNodes []*ipld.EthTx
|
||||
logNodes [][]*ipld.EthLog
|
||||
}
|
||||
|
||||
// processObjects publishes and indexes receipt and transaction IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error {
|
||||
// processReceiptsAndTxs publishes and indexes receipt and transaction IPLDs in Postgres
|
||||
func (sdi *StateDiffIndexer) processReceiptsAndTxs(tx *BatchTx, args processArgs) error {
|
||||
// Process receipts and txs
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber, args.blockTime)
|
||||
signer := types.MakeSigner(sdi.chainConfig, args.blockNumber)
|
||||
for i, receipt := range args.receipts {
|
||||
txNode := args.txNodes[i]
|
||||
tx.cacheIPLD(txNode)
|
||||
@ -306,19 +293,6 @@ func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error
|
||||
return err
|
||||
}
|
||||
|
||||
if trx.Type() == types.BlobTxType {
|
||||
blobHashes := trx.BlobHashes()
|
||||
for i, hash := range blobHashes {
|
||||
if err := sdi.dbWriter.upsertBlobHash(tx.dbtx, models.BlobHashModel{
|
||||
TxHash: txID,
|
||||
Index: uint64(i),
|
||||
BlobHash: hash,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this is the contract address if this receipt is for a contract creation tx
|
||||
contract := shared.HandleZeroAddr(receipt.ContractAddress)
|
||||
|
||||
@ -366,23 +340,7 @@ func (sdi *StateDiffIndexer) processObjects(tx *BatchTx, args processArgs) error
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Process withdrawals
|
||||
for i, withdrawal := range args.withdrawals {
|
||||
wdNode := args.wdNodes[i]
|
||||
tx.cacheIPLD(wdNode)
|
||||
wdModel := models.WithdrawalModel{
|
||||
BlockNumber: args.blockNumber.String(),
|
||||
HeaderID: args.headerID,
|
||||
CID: wdNode.Cid().String(),
|
||||
Index: withdrawal.Index,
|
||||
Validator: withdrawal.Validator,
|
||||
Address: withdrawal.Address.String(),
|
||||
Amount: withdrawal.Amount,
|
||||
}
|
||||
if err := sdi.dbWriter.upsertWithdrawalCID(tx.dbtx, wdModel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -52,10 +52,8 @@ type Statements interface {
|
||||
SetCanonicalHeaderStm() string
|
||||
InsertUncleStm() string
|
||||
InsertTxStm() string
|
||||
InsertBlobHashStm() string
|
||||
InsertRctStm() string
|
||||
InsertLogStm() string
|
||||
InsertWithdrawalStm() string
|
||||
InsertStateStm() string
|
||||
InsertStorageStm() string
|
||||
InsertIPLDStm() string
|
||||
|
@ -3,6 +3,7 @@ package sql
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
|
||||
@ -15,6 +16,7 @@ const copyFromCheckLimit = 100
|
||||
type DelayedTx struct {
|
||||
cache []interface{}
|
||||
db Database
|
||||
sync.RWMutex
|
||||
}
|
||||
type cachedStmt struct {
|
||||
sql string
|
||||
@ -27,6 +29,8 @@ type copyFrom struct {
|
||||
rows [][]interface{}
|
||||
}
|
||||
|
||||
type result int64
|
||||
|
||||
func (cf *copyFrom) appendRows(rows [][]interface{}) {
|
||||
cf.rows = append(cf.rows, rows...)
|
||||
}
|
||||
@ -44,6 +48,8 @@ func (tx *DelayedTx) QueryRow(ctx context.Context, sql string, args ...interface
|
||||
}
|
||||
|
||||
func (tx *DelayedTx) findPrevCopyFrom(tableName []string, columnNames []string, limit int) (*copyFrom, int) {
|
||||
tx.RLock()
|
||||
defer tx.RUnlock()
|
||||
for pos, count := len(tx.cache)-1, 0; pos >= 0 && count < limit; pos, count = pos-1, count+1 {
|
||||
prevCopy, ok := tx.cache[pos].(*copyFrom)
|
||||
if ok && prevCopy.matches(tableName, columnNames) {
|
||||
@ -59,15 +65,19 @@ func (tx *DelayedTx) CopyFrom(ctx context.Context, tableName []string, columnNam
|
||||
"current", len(prevCopy.rows), "new", len(rows), "distance", distance)
|
||||
prevCopy.appendRows(rows)
|
||||
} else {
|
||||
tx.Lock()
|
||||
tx.cache = append(tx.cache, ©From{tableName, columnNames, rows})
|
||||
tx.Unlock()
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (tx *DelayedTx) Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) {
|
||||
tx.Lock()
|
||||
tx.cache = append(tx.cache, cachedStmt{sql, args})
|
||||
return nil, nil
|
||||
defer tx.Unlock()
|
||||
return result(0), nil
|
||||
}
|
||||
|
||||
func (tx *DelayedTx) Commit(ctx context.Context) error {
|
||||
@ -85,6 +95,8 @@ func (tx *DelayedTx) Commit(ctx context.Context) error {
|
||||
rollback(ctx, base)
|
||||
}
|
||||
}()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
for _, item := range tx.cache {
|
||||
switch item := item.(type) {
|
||||
case *copyFrom:
|
||||
@ -105,6 +117,13 @@ func (tx *DelayedTx) Commit(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (tx *DelayedTx) Rollback(ctx context.Context) error {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
tx.cache = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// RowsAffected satisfies sql.Result
|
||||
func (r result) RowsAffected() (int64, error) {
|
||||
return int64(r), nil
|
||||
}
|
||||
|
@ -96,14 +96,6 @@ func TestPGXIndexer(t *testing.T) {
|
||||
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index withdrawal IPLDs in a single tx", func(t *testing.T) {
|
||||
setupPGX(t)
|
||||
defer tearDown(t)
|
||||
defer checkTxClosure(t, 1, 0, 1)
|
||||
|
||||
test.DoTestPublishAndIndexWithdrawalIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||
setupPGX(t)
|
||||
defer tearDown(t)
|
||||
|
@ -18,7 +18,6 @@ package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||
@ -44,9 +43,7 @@ type DB struct {
|
||||
|
||||
// MaxHeaderStm satisfies the sql.Statements interface
|
||||
func (db *DB) MaxHeaderStm() string {
|
||||
return fmt.Sprintf("SELECT %s FROM %s ORDER BY block_number DESC LIMIT 1",
|
||||
strings.Join(schema.TableHeader.ColumnNames(), ","),
|
||||
schema.TableHeader.Name)
|
||||
return fmt.Sprintf("SELECT block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase FROM %s ORDER BY block_number DESC LIMIT 1", schema.TableHeader.Name)
|
||||
}
|
||||
|
||||
// ExistsHeaderStm satisfies the sql.Statements interface
|
||||
@ -62,7 +59,7 @@ func (db *DB) DetectGapsStm() string {
|
||||
// InsertHeaderStm satisfies the sql.Statements interface
|
||||
// Stm == Statement
|
||||
func (db *DB) InsertHeaderStm() string {
|
||||
return schema.TableHeader.PreparedInsert(db.upsert)
|
||||
return schema.TableHeader.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// SetCanonicalHeaderStm satisfies the sql.Statements interface
|
||||
@ -73,47 +70,37 @@ func (db *DB) SetCanonicalHeaderStm() string {
|
||||
|
||||
// InsertUncleStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertUncleStm() string {
|
||||
return schema.TableUncle.PreparedInsert(db.upsert)
|
||||
return schema.TableUncle.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertTxStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertTxStm() string {
|
||||
return schema.TableTransaction.PreparedInsert(db.upsert)
|
||||
}
|
||||
|
||||
// InsertBlobHashStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertBlobHashStm() string {
|
||||
return schema.TableBlobHash.PreparedInsert(db.upsert)
|
||||
return schema.TableTransaction.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertRctStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertRctStm() string {
|
||||
return schema.TableReceipt.PreparedInsert(db.upsert)
|
||||
return schema.TableReceipt.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertLogStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertLogStm() string {
|
||||
return schema.TableLog.PreparedInsert(db.upsert)
|
||||
}
|
||||
|
||||
// InsertLogStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertWithdrawalStm() string {
|
||||
return schema.TableWithdrawal.PreparedInsert(db.upsert)
|
||||
return schema.TableLog.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertStateStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertStateStm() string {
|
||||
return schema.TableStateNode.PreparedInsert(db.upsert)
|
||||
return schema.TableStateNode.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertStorageStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertStorageStm() string {
|
||||
return schema.TableStorageNode.PreparedInsert(db.upsert)
|
||||
return schema.TableStorageNode.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertIPLDStm satisfies the sql.Statements interface
|
||||
func (db *DB) InsertIPLDStm() string {
|
||||
return schema.TableIPLDBlock.PreparedInsert(db.upsert)
|
||||
return schema.TableIPLDBlock.ToInsertStatement(db.upsert)
|
||||
}
|
||||
|
||||
// InsertIPLDsStm satisfies the sql.Statements interface
|
||||
|
@ -37,6 +37,7 @@ type PGXDriver struct {
|
||||
ctx context.Context
|
||||
pool *pgxpool.Pool
|
||||
nodeInfo node.Info
|
||||
nodeID string
|
||||
config Config
|
||||
}
|
||||
|
||||
@ -49,25 +50,21 @@ func ConnectPGX(ctx context.Context, config Config) (*pgxpool.Pool, error) {
|
||||
return pgxpool.ConnectConfig(ctx, pgConf)
|
||||
}
|
||||
|
||||
// ConnectPGXDriver returns a new pgx driver
|
||||
// NewPGXDriver returns a new pgx driver
|
||||
// it initializes the connection pool and creates the node info table
|
||||
func ConnectPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
|
||||
func NewPGXDriver(ctx context.Context, config Config, node node.Info) (*PGXDriver, error) {
|
||||
dbPool, err := ConnectPGX(ctx, config)
|
||||
if err != nil {
|
||||
return nil, ErrDBConnectionFailed(err)
|
||||
}
|
||||
pg := NewPGXDriver(ctx, dbPool, config)
|
||||
nodeErr := pg.createNode(node)
|
||||
pg := &PGXDriver{ctx: ctx, pool: dbPool, nodeInfo: node, config: config}
|
||||
nodeErr := pg.createNode()
|
||||
if nodeErr != nil {
|
||||
return &PGXDriver{}, ErrUnableToSetNode(nodeErr)
|
||||
}
|
||||
return pg, nil
|
||||
}
|
||||
|
||||
func NewPGXDriver(ctx context.Context, pool *pgxpool.Pool, config Config) *PGXDriver {
|
||||
return &PGXDriver{ctx: ctx, pool: pool, config: config}
|
||||
}
|
||||
|
||||
// MakeConfig creates a pgxpool.Config from the provided Config
|
||||
func MakeConfig(config Config) (*pgxpool.Config, error) {
|
||||
conf, err := pgxpool.ParseConfig("")
|
||||
@ -105,19 +102,19 @@ func MakeConfig(config Config) (*pgxpool.Config, error) {
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func (pgx *PGXDriver) createNode(nodeInfo node.Info) error {
|
||||
func (pgx *PGXDriver) createNode() error {
|
||||
_, err := pgx.pool.Exec(
|
||||
pgx.ctx,
|
||||
createNodeStm,
|
||||
nodeInfo.GenesisBlock,
|
||||
nodeInfo.NetworkID,
|
||||
nodeInfo.ID,
|
||||
nodeInfo.ClientName,
|
||||
nodeInfo.ChainID)
|
||||
pgx.nodeInfo.GenesisBlock,
|
||||
pgx.nodeInfo.NetworkID,
|
||||
pgx.nodeInfo.ID,
|
||||
pgx.nodeInfo.ClientName,
|
||||
pgx.nodeInfo.ChainID)
|
||||
if err != nil {
|
||||
return ErrUnableToSetNode(err)
|
||||
}
|
||||
pgx.nodeInfo = nodeInfo
|
||||
pgx.nodeID = pgx.nodeInfo.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -158,7 +155,7 @@ func (pgx *PGXDriver) Stats() metrics.DbStats {
|
||||
|
||||
// NodeID satisfies sql.Database
|
||||
func (pgx *PGXDriver) NodeID() string {
|
||||
return pgx.nodeInfo.ID
|
||||
return pgx.nodeID
|
||||
}
|
||||
|
||||
// Close satisfies sql.Database/io.Closer
|
||||
|
@ -94,7 +94,7 @@ func TestPostgresPGX(t *testing.T) {
|
||||
|
||||
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
||||
goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
_, err := postgres.ConnectPGXDriver(ctx, postgres.Config{}, goodInfo)
|
||||
_, err := postgres.NewPGXDriver(ctx, postgres.Config{}, goodInfo)
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
@ -106,7 +106,7 @@ func TestPostgresPGX(t *testing.T) {
|
||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
|
||||
_, err := postgres.ConnectPGXDriver(ctx, pgConfig, badInfo)
|
||||
_, err := postgres.NewPGXDriver(ctx, pgConfig, badInfo)
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ type SQLXDriver struct {
|
||||
ctx context.Context
|
||||
db *sqlx.DB
|
||||
nodeInfo node.Info
|
||||
nodeID string
|
||||
}
|
||||
|
||||
// ConnectSQLX initializes and returns a SQLX connection pool for postgres
|
||||
@ -52,36 +53,32 @@ func ConnectSQLX(ctx context.Context, config Config) (*sqlx.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ConnectSQLXDriver returns a new sqlx driver for Postgres
|
||||
// NewSQLXDriver returns a new sqlx driver for Postgres
|
||||
// it initializes the connection pool and creates the node info table
|
||||
func ConnectSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
|
||||
func NewSQLXDriver(ctx context.Context, config Config, node node.Info) (*SQLXDriver, error) {
|
||||
db, err := ConnectSQLX(ctx, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
driver := NewSQLXDriver(ctx, db)
|
||||
if err := driver.createNode(node); err != nil {
|
||||
driver := &SQLXDriver{ctx: ctx, db: db, nodeInfo: node}
|
||||
if err := driver.createNode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
func NewSQLXDriver(ctx context.Context, db *sqlx.DB) *SQLXDriver {
|
||||
return &SQLXDriver{ctx: ctx, db: db}
|
||||
}
|
||||
|
||||
func (driver *SQLXDriver) createNode(nodeInfo node.Info) error {
|
||||
func (driver *SQLXDriver) createNode() error {
|
||||
_, err := driver.db.Exec(
|
||||
createNodeStm,
|
||||
nodeInfo.GenesisBlock,
|
||||
nodeInfo.NetworkID,
|
||||
nodeInfo.ID,
|
||||
nodeInfo.ClientName,
|
||||
nodeInfo.ChainID)
|
||||
driver.nodeInfo.GenesisBlock,
|
||||
driver.nodeInfo.NetworkID,
|
||||
driver.nodeInfo.ID,
|
||||
driver.nodeInfo.ClientName,
|
||||
driver.nodeInfo.ChainID)
|
||||
if err != nil {
|
||||
return ErrUnableToSetNode(err)
|
||||
}
|
||||
driver.nodeInfo = nodeInfo
|
||||
driver.nodeID = driver.nodeInfo.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -121,7 +118,7 @@ func (driver *SQLXDriver) Stats() metrics.DbStats {
|
||||
|
||||
// NodeID satisfies sql.Database
|
||||
func (driver *SQLXDriver) NodeID() string {
|
||||
return driver.nodeInfo.ID
|
||||
return driver.nodeID
|
||||
}
|
||||
|
||||
// Close satisfies sql.Database/io.Closer
|
||||
|
@ -97,7 +97,7 @@ func TestPostgresSQLX(t *testing.T) {
|
||||
|
||||
t.Run("throws error when can't connect to the database", func(t *testing.T) {
|
||||
goodInfo := node.Info{GenesisBlock: "GENESIS", NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
_, err := postgres.ConnectSQLXDriver(ctx, postgres.Config{}, goodInfo)
|
||||
_, err := postgres.NewSQLXDriver(ctx, postgres.Config{}, goodInfo)
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
@ -109,7 +109,7 @@ func TestPostgresSQLX(t *testing.T) {
|
||||
badHash := fmt.Sprintf("x %s", strings.Repeat("1", 100))
|
||||
badInfo := node.Info{GenesisBlock: badHash, NetworkID: "1", ID: "x123", ClientName: "geth"}
|
||||
|
||||
_, err := postgres.ConnectSQLXDriver(ctx, pgConfig, badInfo)
|
||||
_, err := postgres.NewSQLXDriver(ctx, pgConfig, badInfo)
|
||||
if err == nil {
|
||||
t.Fatal("Expected an error")
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ func SetupSQLXDB() (sql.Database, error) {
|
||||
return nil, err
|
||||
}
|
||||
conf.MaxIdle = 0
|
||||
driver, err := ConnectSQLXDriver(context.Background(), conf, node.Info{})
|
||||
driver, err := NewSQLXDriver(context.Background(), conf, node.Info{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -39,7 +39,7 @@ func SetupSQLXDB() (sql.Database, error) {
|
||||
|
||||
// SetupPGXDB is used to setup a pgx db for tests
|
||||
func SetupPGXDB(config Config) (sql.Database, error) {
|
||||
driver, err := ConnectPGXDriver(context.Background(), config, node.Info{})
|
||||
driver, err := NewPGXDriver(context.Background(), config, node.Info{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -82,14 +82,6 @@ func TestSQLXIndexer(t *testing.T) {
|
||||
test.DoTestPublishAndIndexReceiptIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index withdrawal IPLDs in a single tx", func(t *testing.T) {
|
||||
setupSQLX(t)
|
||||
defer tearDown(t)
|
||||
defer checkTxClosure(t, 0, 0, 0)
|
||||
|
||||
test.DoTestPublishAndIndexWithdrawalIPLDs(t, db)
|
||||
})
|
||||
|
||||
t.Run("Publish and index state IPLDs in a single tx", func(t *testing.T) {
|
||||
setupSQLX(t)
|
||||
defer tearDown(t)
|
||||
|
@ -95,7 +95,6 @@ func (w *Writer) maxHeader() (*models.HeaderModel, error) {
|
||||
&model.Timestamp,
|
||||
&model.Coinbase,
|
||||
&model.Canonical,
|
||||
&model.WithdrawalsRoot,
|
||||
)
|
||||
model.BlockNumber = strconv.FormatUint(number, 10)
|
||||
model.TotalDifficulty = strconv.FormatUint(td, 10)
|
||||
@ -126,7 +125,6 @@ func (w *Writer) upsertHeaderCID(tx Tx, header models.HeaderModel) error {
|
||||
header.Timestamp,
|
||||
header.Coinbase,
|
||||
header.Canonical,
|
||||
header.WithdrawalsRoot,
|
||||
)
|
||||
if err != nil {
|
||||
return insertError{"eth.header_cids", err, w.db.InsertHeaderStm(), header}
|
||||
@ -209,30 +207,6 @@ func (w *Writer) upsertTransactionCID(tx Tx, transaction models.TxModel) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
INSERT INTO eth.blob_hashes (tx_hash, index, blob_hash) VALUES ($1, $2, $3)
|
||||
ON CONFLICT (tx_hash, index) DO NOTHING
|
||||
*/
|
||||
func (w *Writer) upsertBlobHash(tx Tx, blobHash models.BlobHashModel) error {
|
||||
if w.useCopyForTx(tx) {
|
||||
rows := toRows(toRow(blobHash.TxHash, blobHash.Index, blobHash.BlobHash))
|
||||
_, err := tx.CopyFrom(w.db.Context(), schema.TableBlobHash.TableName(), schema.TableBlobHash.ColumnNames(), rows)
|
||||
if err != nil {
|
||||
return insertError{"eth.blob_hashes", err, "COPY", blobHash}
|
||||
}
|
||||
} else {
|
||||
_, err := tx.Exec(w.db.Context(), w.db.InsertBlobHashStm(),
|
||||
blobHash.TxHash,
|
||||
blobHash.Index,
|
||||
blobHash.BlobHash,
|
||||
)
|
||||
if err != nil {
|
||||
return insertError{"eth.blob_hashes", err, w.db.InsertBlobHashStm(), blobHash}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
INSERT INTO eth.receipt_cids (block_number, header_id, tx_id, cid, contract, post_state, post_status) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
ON CONFLICT (tx_id, header_id, block_number) DO NOTHING
|
||||
@ -312,41 +286,6 @@ func (w *Writer) upsertLogCID(tx Tx, logs []*models.LogsModel) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) upsertWithdrawalCID(tx Tx, withdrawal models.WithdrawalModel) error {
|
||||
if w.useCopyForTx(tx) {
|
||||
blockNum, err := strconv.ParseUint(withdrawal.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return insertError{"eth.withdrawal_cids", err, "COPY", withdrawal}
|
||||
}
|
||||
|
||||
_, err = tx.CopyFrom(w.db.Context(), schema.TableWithdrawal.TableName(), schema.TableWithdrawal.ColumnNames(),
|
||||
toRows(toRow(blockNum,
|
||||
withdrawal.HeaderID,
|
||||
withdrawal.CID,
|
||||
withdrawal.Index,
|
||||
withdrawal.Validator,
|
||||
withdrawal.Address,
|
||||
withdrawal.Amount)))
|
||||
if err != nil {
|
||||
return insertError{"eth.withdrawal_cids", err, "COPY", withdrawal}
|
||||
}
|
||||
} else {
|
||||
_, err := tx.Exec(w.db.Context(), w.db.InsertWithdrawalStm(),
|
||||
withdrawal.BlockNumber,
|
||||
withdrawal.HeaderID,
|
||||
withdrawal.CID,
|
||||
withdrawal.Index,
|
||||
withdrawal.Validator,
|
||||
withdrawal.Address,
|
||||
withdrawal.Amount)
|
||||
if err != nil {
|
||||
return insertError{"eth.withdrawal_cids", err, w.db.InsertWithdrawalStm(), withdrawal}
|
||||
}
|
||||
}
|
||||
metrics.IndexerMetrics.WithdrawalsCounter.Inc(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
INSERT INTO eth.state_cids (block_number, header_id, state_leaf_key, cid, removed, diff, balance, nonce, code_hash, storage_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
ON CONFLICT (header_id, state_leaf_key, block_number) DO NOTHING
|
||||
|
@ -1,102 +0,0 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2024 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// EncodeHeader converts a *types.Header into an IPLD node
|
||||
func EncodeHeader(header *types.Header) (IPLD, error) {
|
||||
headerRLP, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &node{
|
||||
cid: c,
|
||||
rawdata: headerRLP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncodeTx converts a *types.Transaction to an IPLD node
|
||||
func EncodeTx(tx *types.Transaction) (IPLD, error) {
|
||||
txRaw, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTx, txRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &node{
|
||||
cid: c,
|
||||
rawdata: txRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncodeReceipt converts a types.Receipt to an IPLD node
|
||||
func EncodeReceipt(receipt *types.Receipt) (IPLD, error) {
|
||||
rctRaw, err := receipt.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxReceipt, rctRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &node{
|
||||
cid: c,
|
||||
rawdata: rctRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncodeLog converts a Log to an IPLD node
|
||||
func EncodeLog(log *types.Log) (IPLD, error) {
|
||||
logRaw, err := rlp.EncodeToBytes(log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthLog, logRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &node{
|
||||
cid: c,
|
||||
rawdata: logRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func EncodeWithdrawal(w *types.Withdrawal) (IPLD, error) {
|
||||
wRaw, err := rlp.EncodeToBytes(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthWithdrawal, wRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &node{
|
||||
cid: c,
|
||||
rawdata: wRaw,
|
||||
}, nil
|
||||
}
|
60
indexer/ipld/eth_header.go
Normal file
60
indexer/ipld/eth_header.go
Normal file
@ -0,0 +1,60 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// EthHeader (eth-block, codec 0x90), represents an ethereum block header
|
||||
type EthHeader struct {
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthHeader satisfies the node.Node interface.
|
||||
var _ IPLD = (*EthHeader)(nil)
|
||||
|
||||
// NewEthHeader converts a *types.Header into an EthHeader IPLD node
|
||||
func NewEthHeader(header *types.Header) (*EthHeader, error) {
|
||||
headerRLP, err := rlp.EncodeToBytes(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthHeader, headerRLP, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthHeader{
|
||||
cid: c,
|
||||
rawdata: headerRLP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RawData returns the binary of the RLP encode of the block header.
|
||||
func (b *EthHeader) RawData() []byte {
|
||||
return b.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the block header.
|
||||
func (b *EthHeader) Cid() cid.Cid {
|
||||
return b.cid
|
||||
}
|
43
indexer/ipld/eth_log.go
Normal file
43
indexer/ipld/eth_log.go
Normal file
@ -0,0 +1,43 @@
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// EthLog (eth-log, codec 0x9a), represents an ethereum block header
|
||||
type EthLog struct {
|
||||
rawData []byte
|
||||
cid cid.Cid
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthLog satisfies the node.Node interface.
|
||||
var _ IPLD = (*EthLog)(nil)
|
||||
|
||||
// NewLog create a new EthLog IPLD node
|
||||
func NewLog(log *types.Log) (*EthLog, error) {
|
||||
logRaw, err := rlp.EncodeToBytes(log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthLog, logRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthLog{
|
||||
cid: c,
|
||||
rawData: logRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RawData returns the binary of the RLP encode of the log.
|
||||
func (l *EthLog) RawData() []byte {
|
||||
return l.rawData
|
||||
}
|
||||
|
||||
// Cid returns the cid of the receipt log.
|
||||
func (l *EthLog) Cid() cid.Cid {
|
||||
return l.cid
|
||||
}
|
@ -22,29 +22,25 @@ import (
|
||||
|
||||
// FromBlockAndReceipts takes a block and processes it
|
||||
// to return it a set of IPLD nodes for further processing.
|
||||
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) ([]IPLD, []IPLD, [][]IPLD, []IPLD, error) {
|
||||
func FromBlockAndReceipts(block *types.Block, receipts []*types.Receipt) ([]*EthTx, []*EthReceipt, [][]*EthLog, error) {
|
||||
// Process the txs
|
||||
txNodes, err := processTransactions(block.Transactions())
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
withdrawalNodes, err := processWithdrawals(block.Withdrawals())
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Process the receipts and logs
|
||||
rctNodes, logNodes, err := processReceiptsAndLogs(receipts)
|
||||
|
||||
return txNodes, rctNodes, logNodes, withdrawalNodes, err
|
||||
return txNodes, rctNodes, logNodes, err
|
||||
}
|
||||
|
||||
// processTransactions will take the found transactions in a parsed block body
|
||||
// to return IPLD node slices for eth-tx
|
||||
func processTransactions(txs []*types.Transaction) ([]IPLD, error) {
|
||||
var ethTxNodes []IPLD
|
||||
func processTransactions(txs []*types.Transaction) ([]*EthTx, error) {
|
||||
var ethTxNodes []*EthTx
|
||||
for _, tx := range txs {
|
||||
ethTx, err := EncodeTx(tx)
|
||||
ethTx, err := NewEthTx(tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -54,25 +50,12 @@ func processTransactions(txs []*types.Transaction) ([]IPLD, error) {
|
||||
return ethTxNodes, nil
|
||||
}
|
||||
|
||||
func processWithdrawals(withdrawals []*types.Withdrawal) ([]IPLD, error) {
|
||||
var withdrawalNodes []IPLD
|
||||
for _, withdrawal := range withdrawals {
|
||||
ethW, err := EncodeWithdrawal(withdrawal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawalNodes = append(withdrawalNodes, ethW)
|
||||
}
|
||||
|
||||
return withdrawalNodes, nil
|
||||
}
|
||||
|
||||
// processReceiptsAndLogs will take in receipts
|
||||
// to return IPLD node slices for eth-rct and eth-log
|
||||
func processReceiptsAndLogs(rcts []*types.Receipt) ([]IPLD, [][]IPLD, error) {
|
||||
func processReceiptsAndLogs(rcts []*types.Receipt) ([]*EthReceipt, [][]*EthLog, error) {
|
||||
// Pre allocating memory.
|
||||
ethRctNodes := make([]IPLD, len(rcts))
|
||||
ethLogNodes := make([][]IPLD, len(rcts))
|
||||
ethRctNodes := make([]*EthReceipt, len(rcts))
|
||||
ethLogNodes := make([][]*EthLog, len(rcts))
|
||||
|
||||
for idx, rct := range rcts {
|
||||
logNodes, err := processLogs(rct.Logs)
|
||||
@ -80,7 +63,7 @@ func processReceiptsAndLogs(rcts []*types.Receipt) ([]IPLD, [][]IPLD, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ethRct, err := EncodeReceipt(rct)
|
||||
ethRct, err := NewReceipt(rct)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -92,10 +75,10 @@ func processReceiptsAndLogs(rcts []*types.Receipt) ([]IPLD, [][]IPLD, error) {
|
||||
return ethRctNodes, ethLogNodes, nil
|
||||
}
|
||||
|
||||
func processLogs(logs []*types.Log) ([]IPLD, error) {
|
||||
logNodes := make([]IPLD, len(logs))
|
||||
func processLogs(logs []*types.Log) ([]*EthLog, error) {
|
||||
logNodes := make([]*EthLog, len(logs))
|
||||
for idx, log := range logs {
|
||||
logNode, err := EncodeLog(log)
|
||||
logNode, err := NewLog(log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ func loadBlockData(t *testing.T) []testCase {
|
||||
func TestFromBlockAndReceipts(t *testing.T) {
|
||||
testCases := loadBlockData(t)
|
||||
for _, tc := range testCases {
|
||||
_, _, _, _, err := FromBlockAndReceipts(tc.block, tc.receipts)
|
||||
_, _, _, err := FromBlockAndReceipts(tc.block, tc.receipts)
|
||||
if err != nil {
|
||||
t.Fatalf("error generating IPLDs from block and receipts, err %v, kind %s, block hash %s", err, tc.kind, tc.block.Hash())
|
||||
}
|
||||
|
58
indexer/ipld/eth_receipt.go
Normal file
58
indexer/ipld/eth_receipt.go
Normal file
@ -0,0 +1,58 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type EthReceipt struct {
|
||||
rawdata []byte
|
||||
cid cid.Cid
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthReceipt satisfies the node.Node interface.
|
||||
var _ IPLD = (*EthReceipt)(nil)
|
||||
|
||||
// NewReceipt converts a types.ReceiptForStorage to an EthReceipt IPLD node
|
||||
func NewReceipt(receipt *types.Receipt) (*EthReceipt, error) {
|
||||
rctRaw, err := receipt.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTxReceipt, rctRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthReceipt{
|
||||
cid: c,
|
||||
rawdata: rctRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RawData returns the binary of the RLP encode of the receipt.
|
||||
func (r *EthReceipt) RawData() []byte {
|
||||
return r.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the receipt.
|
||||
func (r *EthReceipt) Cid() cid.Cid {
|
||||
return r.cid
|
||||
}
|
59
indexer/ipld/eth_tx.go
Normal file
59
indexer/ipld/eth_tx.go
Normal file
@ -0,0 +1,59 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipld
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// EthTx (eth-tx codec 0x93) represents an ethereum transaction
|
||||
type EthTx struct {
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
// Static (compile time) check that EthTx satisfies the node.Node interface.
|
||||
var _ IPLD = (*EthTx)(nil)
|
||||
|
||||
// NewEthTx converts a *types.Transaction to an EthTx IPLD node
|
||||
func NewEthTx(tx *types.Transaction) (*EthTx, error) {
|
||||
txRaw, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := RawdataToCid(MEthTx, txRaw, mh.KECCAK_256)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthTx{
|
||||
cid: c,
|
||||
rawdata: txRaw,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RawData returns the binary of the RLP encode of the transaction.
|
||||
func (t *EthTx) RawData() []byte {
|
||||
return t.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the cid of the transaction.
|
||||
func (t *EthTx) Cid() cid.Cid {
|
||||
return t.cid
|
||||
}
|
@ -2,25 +2,7 @@ package ipld
|
||||
|
||||
import "github.com/ipfs/go-cid"
|
||||
|
||||
// Check that node satisfies the IPLD Node interface.
|
||||
var _ IPLD = (*node)(nil)
|
||||
|
||||
type node struct {
|
||||
cid cid.Cid
|
||||
rawdata []byte
|
||||
}
|
||||
|
||||
type IPLD interface {
|
||||
Cid() cid.Cid
|
||||
RawData() []byte
|
||||
}
|
||||
|
||||
// RawData returns the RLP encoded bytes of the node.
|
||||
func (b node) RawData() []byte {
|
||||
return b.rawdata
|
||||
}
|
||||
|
||||
// Cid returns the CID of the node.
|
||||
func (b node) Cid() cid.Cid {
|
||||
return b.cid
|
||||
}
|
||||
|
@ -37,7 +37,6 @@ const (
|
||||
MEthStorageTrie = 0x98
|
||||
MEthLogTrie = 0x99
|
||||
MEthLog = 0x9a
|
||||
MEthWithdrawal = 0x9b // TODO add to multicodec registry
|
||||
)
|
||||
|
||||
// RawdataToCid takes the desired codec and a slice of bytes
|
||||
|
@ -19,8 +19,8 @@ package mocks
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -28,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||
@ -39,18 +38,14 @@ import (
|
||||
|
||||
// Test variables
|
||||
var (
|
||||
// RNG for deterministically generated keys
|
||||
rng = rand.New(rand.NewSource(0))
|
||||
|
||||
// block data
|
||||
TestChainConfig = params.MainnetChainConfig
|
||||
BlockNumber = TestChainConfig.LondonBlock
|
||||
BlockTime = *TestChainConfig.CancunTime
|
||||
|
||||
// canonical block at London height
|
||||
// includes 5 transactions: 3 Legacy + 1 EIP-2930 + 1 EIP-1559
|
||||
MockHeader = types.Header{
|
||||
Time: BlockTime,
|
||||
Time: 0,
|
||||
Number: new(big.Int).Set(BlockNumber),
|
||||
Root: common.HexToHash("0x0"),
|
||||
TxHash: common.HexToHash("0x0"),
|
||||
@ -60,26 +55,21 @@ var (
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
||||
}
|
||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestChainConfig, BlockNumber, BlockTime)
|
||||
MockWithdrawals = types.Withdrawals{
|
||||
{Index: 0, Validator: 1, Address: Address, Amount: 1000000000},
|
||||
{Index: 1, Validator: 5, Address: AnotherAddress, Amount: 2000000000},
|
||||
}
|
||||
MockBlock = types.NewBlockWithWithdrawals(&MockHeader, MockTransactions, nil, MockReceipts, MockWithdrawals, trie.NewEmpty(nil))
|
||||
MockTransactions, MockReceipts, SenderAddr = createTransactionsAndReceipts(TestChainConfig, BlockNumber)
|
||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts, trie.NewEmpty(nil))
|
||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||
|
||||
// non-canonical block at London height
|
||||
// includes 2nd and 5th transactions from the canonical block
|
||||
MockNonCanonicalHeader = MockHeader
|
||||
MockNonCanonicalBlockTransactions = types.Transactions{MockTransactions[1], MockTransactions[4]}
|
||||
MockNonCanonicalBlockReceipts = createNonCanonicalBlockReceipts(TestChainConfig, BlockNumber, BlockTime, MockNonCanonicalBlockTransactions)
|
||||
MockNonCanonicalBlock = types.NewBlockWithWithdrawals(&MockNonCanonicalHeader, MockNonCanonicalBlockTransactions, nil, MockNonCanonicalBlockReceipts, MockWithdrawals[:1], trie.NewEmpty(nil))
|
||||
MockNonCanonicalBlockReceipts = createNonCanonicalBlockReceipts(TestChainConfig, BlockNumber, MockNonCanonicalBlockTransactions)
|
||||
MockNonCanonicalBlock = types.NewBlock(&MockNonCanonicalHeader, MockNonCanonicalBlockTransactions, nil, MockNonCanonicalBlockReceipts, trie.NewEmpty(nil))
|
||||
MockNonCanonicalHeaderRlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock.Header())
|
||||
|
||||
// non-canonical block at London height + 1
|
||||
// includes 3rd and 5th transactions from the canonical block
|
||||
Block2Number = big.NewInt(BlockNumber.Int64() + 1)
|
||||
Block2Time = BlockTime + 1
|
||||
MockNonCanonicalHeader2 = types.Header{
|
||||
Time: 0,
|
||||
Number: new(big.Int).Set(Block2Number),
|
||||
@ -92,8 +82,8 @@ var (
|
||||
Coinbase: common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476777"),
|
||||
}
|
||||
MockNonCanonicalBlock2Transactions = types.Transactions{MockTransactions[2], MockTransactions[4]}
|
||||
MockNonCanonicalBlock2Receipts = createNonCanonicalBlockReceipts(TestChainConfig, Block2Number, BlockTime, MockNonCanonicalBlock2Transactions)
|
||||
MockNonCanonicalBlock2 = types.NewBlockWithWithdrawals(&MockNonCanonicalHeader2, MockNonCanonicalBlock2Transactions, nil, MockNonCanonicalBlock2Receipts, types.Withdrawals{}, trie.NewEmpty(nil))
|
||||
MockNonCanonicalBlock2Receipts = createNonCanonicalBlockReceipts(TestChainConfig, Block2Number, MockNonCanonicalBlock2Transactions)
|
||||
MockNonCanonicalBlock2 = types.NewBlock(&MockNonCanonicalHeader2, MockNonCanonicalBlock2Transactions, nil, MockNonCanonicalBlock2Receipts, trie.NewEmpty(nil))
|
||||
MockNonCanonicalHeader2Rlp, _ = rlp.EncodeToBytes(MockNonCanonicalBlock2.Header())
|
||||
|
||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
@ -168,7 +158,7 @@ var (
|
||||
ContractLeafKey = test_helpers.AddressToLeafKey(ContractAddress)
|
||||
ContractAccount = &types.StateAccount{
|
||||
Nonce: nonce1,
|
||||
Balance: uint256.NewInt(0),
|
||||
Balance: big.NewInt(0),
|
||||
CodeHash: ContractCodeHash.Bytes(),
|
||||
Root: common.HexToHash(ContractRoot),
|
||||
}
|
||||
@ -192,7 +182,7 @@ var (
|
||||
AccountCodeHash = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
AccountLeafKey = test_helpers.Account2LeafKey
|
||||
RemovedLeafKey = test_helpers.Account1LeafKey
|
||||
Balance = uint256.MustFromDecimal("106387458790507306766")
|
||||
Balance, _ = new(big.Int).SetString("106387458790507306766", 10)
|
||||
Account = &types.StateAccount{
|
||||
Nonce: nonce0,
|
||||
Balance: Balance,
|
||||
@ -355,10 +345,7 @@ func NewLegacyData(config *params.ChainConfig) *LegacyData {
|
||||
|
||||
mockTransactions, mockReceipts, senderAddr := createLegacyTransactionsAndReceipts(config, blockNumber)
|
||||
mockBlock := types.NewBlock(&mockHeader, mockTransactions, nil, mockReceipts, trie.NewEmpty(nil))
|
||||
mockHeaderRlp, err := rlp.EncodeToBytes(mockBlock.Header())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mockHeaderRlp, _ := rlp.EncodeToBytes(mockBlock.Header())
|
||||
contractAddress := crypto.CreateAddress(senderAddr, mockTransactions[2].Nonce())
|
||||
|
||||
return &LegacyData{
|
||||
@ -394,11 +381,9 @@ func createLegacyTransactionsAndReceipts(config *params.ChainConfig, blockNumber
|
||||
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||
|
||||
// For legacy data, block time is not relevant
|
||||
blockTime := uint64(0)
|
||||
transactionSigner := types.MakeSigner(config, blockNumber, blockTime)
|
||||
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rng)
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
@ -434,15 +419,13 @@ func createLegacyTransactionsAndReceipts(config *params.ChainConfig, blockNumber
|
||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3}, senderAddr
|
||||
}
|
||||
|
||||
// createTransactionsAndReceipts generates signed mock transactions and mock receipts with mock logs, and returns the address of the sender with them.
|
||||
func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) (types.Transactions, types.Receipts, common.Address) {
|
||||
const txCount = 6
|
||||
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||
func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.Int) (types.Transactions, types.Receipts, common.Address) {
|
||||
// make transactions
|
||||
txs := make(types.Transactions, txCount)
|
||||
txs[0] = types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
||||
txs[1] = types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
txs[2] = types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||
txs[3] = types.NewTx(&types.AccessListTx{
|
||||
trx1 := types.NewTransaction(0, Address, big.NewInt(1000), 50, big.NewInt(100), []byte{})
|
||||
trx2 := types.NewTransaction(1, AnotherAddress, big.NewInt(2000), 100, big.NewInt(200), []byte{})
|
||||
trx3 := types.NewContractCreation(2, big.NewInt(1500), 75, big.NewInt(150), MockContractByteCode)
|
||||
trx4 := types.NewTx(&types.AccessListTx{
|
||||
ChainID: config.ChainID,
|
||||
Nonce: 0,
|
||||
GasPrice: big.NewInt(100),
|
||||
@ -455,7 +438,7 @@ func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.
|
||||
AccessListEntry2,
|
||||
},
|
||||
})
|
||||
txs[4] = types.NewTx(&types.DynamicFeeTx{
|
||||
trx5 := types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: config.ChainID,
|
||||
Nonce: 0,
|
||||
GasTipCap: big.NewInt(100),
|
||||
@ -469,84 +452,74 @@ func createTransactionsAndReceipts(config *params.ChainConfig, blockNumber *big.
|
||||
AccessListEntry2,
|
||||
},
|
||||
})
|
||||
txs[5] = types.NewTx(&types.BlobTx{
|
||||
ChainID: uint256.MustFromBig(config.ChainID),
|
||||
Nonce: 0,
|
||||
GasTipCap: uint256.NewInt(100),
|
||||
GasFeeCap: uint256.NewInt(100),
|
||||
Gas: 50,
|
||||
To: AnotherAddress,
|
||||
Value: uint256.NewInt(0),
|
||||
BlobFeeCap: uint256.NewInt(1e6),
|
||||
BlobHashes: []common.Hash{
|
||||
common.HexToHash("0x0100000000000000000000000000000000000000000000000000000000000001"),
|
||||
common.HexToHash("0x0100000000000000000000000000000000000000000000000000000000000002"),
|
||||
},
|
||||
})
|
||||
|
||||
transactionSigner := types.MakeSigner(config, blockNumber, blockTime)
|
||||
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rng)
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
var signedTxs types.Transactions
|
||||
for _, tx := range txs {
|
||||
signed, err := types.SignTx(tx, transactionSigner, mockPrvKey)
|
||||
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTxs = append(signedTxs, signed)
|
||||
signedTrx2, err := types.SignTx(trx2, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
senderAddr, err := types.Sender(transactionSigner, signedTxs[0]) // same for both trx
|
||||
signedTrx3, err := types.SignTx(trx3, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTrx4, err := types.SignTx(trx4, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
signedTrx5, err := types.SignTx(trx5, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
|
||||
senderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
|
||||
// make receipts
|
||||
receipts := make(types.Receipts, txCount)
|
||||
receipts[0] = types.NewReceipt(nil, false, 50)
|
||||
receipts[0].Logs = []*types.Log{MockLog1}
|
||||
receipts[0].TxHash = signedTxs[0].Hash()
|
||||
receipts[1] = types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
||||
receipts[1].Logs = []*types.Log{MockLog2, ShortLog1}
|
||||
receipts[1].TxHash = signedTxs[1].Hash()
|
||||
receipts[2] = types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
|
||||
receipts[2].Logs = []*types.Log{}
|
||||
receipts[2].TxHash = signedTxs[2].Hash()
|
||||
receipts[3] = &types.Receipt{
|
||||
mockReceipt1 := types.NewReceipt(nil, false, 50)
|
||||
mockReceipt1.Logs = []*types.Log{MockLog1}
|
||||
mockReceipt1.TxHash = signedTrx1.Hash()
|
||||
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
||||
mockReceipt2.Logs = []*types.Log{MockLog2, ShortLog1}
|
||||
mockReceipt2.TxHash = signedTrx2.Hash()
|
||||
mockReceipt3 := types.NewReceipt(common.HexToHash("0x2").Bytes(), false, 75)
|
||||
mockReceipt3.Logs = []*types.Log{}
|
||||
mockReceipt3.TxHash = signedTrx3.Hash()
|
||||
mockReceipt4 := &types.Receipt{
|
||||
Type: types.AccessListTxType,
|
||||
PostState: common.HexToHash("0x3").Bytes(),
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 175,
|
||||
Logs: []*types.Log{MockLog3, MockLog4, ShortLog2},
|
||||
TxHash: signedTxs[3].Hash(),
|
||||
TxHash: signedTrx4.Hash(),
|
||||
}
|
||||
receipts[4] = &types.Receipt{
|
||||
mockReceipt5 := &types.Receipt{
|
||||
Type: types.DynamicFeeTxType,
|
||||
PostState: common.HexToHash("0x3").Bytes(),
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 175,
|
||||
Logs: []*types.Log{},
|
||||
TxHash: signedTxs[4].Hash(),
|
||||
}
|
||||
receipts[5] = &types.Receipt{
|
||||
Type: types.BlobTxType,
|
||||
PostState: common.HexToHash("0x3").Bytes(),
|
||||
Status: types.ReceiptStatusSuccessful,
|
||||
CumulativeGasUsed: 175,
|
||||
Logs: []*types.Log{},
|
||||
TxHash: signedTxs[5].Hash(),
|
||||
TxHash: signedTrx5.Hash(),
|
||||
}
|
||||
|
||||
return signedTxs, receipts, senderAddr
|
||||
return types.Transactions{signedTrx1, signedTrx2, signedTrx3, signedTrx4, signedTrx5}, types.Receipts{mockReceipt1, mockReceipt2, mockReceipt3, mockReceipt4, mockReceipt5}, senderAddr
|
||||
}
|
||||
|
||||
// createNonCanonicalBlockReceipts is a helper function to generate mock receipts with mock logs for non-canonical blocks
|
||||
func createNonCanonicalBlockReceipts(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64, transactions types.Transactions) types.Receipts {
|
||||
transactionSigner := types.MakeSigner(config, blockNumber, blockTime)
|
||||
func createNonCanonicalBlockReceipts(config *params.ChainConfig, blockNumber *big.Int, transactions types.Transactions) types.Receipts {
|
||||
transactionSigner := types.MakeSigner(config, blockNumber)
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rng)
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
log.Crit(err.Error())
|
||||
}
|
||||
|
@ -16,10 +16,7 @@
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
import "github.com/lib/pq"
|
||||
|
||||
// IPLDModel is the db model for ipld.blocks
|
||||
type IPLDModel struct {
|
||||
@ -45,7 +42,6 @@ type HeaderModel struct {
|
||||
Timestamp uint64 `db:"timestamp"`
|
||||
Coinbase string `db:"coinbase"`
|
||||
Canonical bool `db:"canonical"`
|
||||
WithdrawalsRoot string `db:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// UncleModel is the db model for eth.uncle_cids
|
||||
@ -109,7 +105,7 @@ type StorageNodeModel struct {
|
||||
Value []byte `db:"val"`
|
||||
}
|
||||
|
||||
// LogsModel is the db model for eth.log_cids
|
||||
// LogsModel is the db model for eth.logs
|
||||
type LogsModel struct {
|
||||
BlockNumber string `db:"block_number"`
|
||||
HeaderID string `db:"header_id"`
|
||||
@ -122,21 +118,3 @@ type LogsModel struct {
|
||||
Topic2 string `db:"topic2"`
|
||||
Topic3 string `db:"topic3"`
|
||||
}
|
||||
|
||||
// WithdrawalModel is the db model for eth.withdrawal_cids
|
||||
type WithdrawalModel struct {
|
||||
BlockNumber string `db:"block_number"`
|
||||
HeaderID string `db:"header_id"`
|
||||
CID string `db:"cid"`
|
||||
Index uint64 `db:"index"`
|
||||
Validator uint64 `db:"validator"`
|
||||
Address string `db:"address"`
|
||||
Amount uint64 `db:"amount"`
|
||||
}
|
||||
|
||||
// BlobHashModel is the DB model for eth.blob_hashes
|
||||
type BlobHashModel struct {
|
||||
TxHash string `db:"tx_hash"`
|
||||
Index uint64 `db:"index"`
|
||||
BlobHash common.Hash `db:"blob_hash"`
|
||||
}
|
||||
|
@ -35,12 +35,3 @@ func HandleZeroAddr(to common.Address) string {
|
||||
}
|
||||
return to.String()
|
||||
}
|
||||
|
||||
// MaybeStringHash calls String on its argument and returns a pointer to the result.
|
||||
// When passed nil, it returns nil.
|
||||
func MaybeStringHash(hash *common.Hash) string {
|
||||
if hash == nil {
|
||||
return ""
|
||||
}
|
||||
return hash.String()
|
||||
}
|
||||
|
@ -16,25 +16,6 @@
|
||||
|
||||
package schema
|
||||
|
||||
var EthTables = []*Table{
|
||||
&TableIPLDBlock,
|
||||
&TableNodeInfo,
|
||||
&TableHeader,
|
||||
&TableStateNode,
|
||||
&TableStorageNode,
|
||||
&TableUncle,
|
||||
&TableTransaction,
|
||||
&TableReceipt,
|
||||
&TableLog,
|
||||
&TableWithdrawal,
|
||||
&TableBlobHash,
|
||||
}
|
||||
|
||||
var AllTables = append(
|
||||
EthTables,
|
||||
&TableWatchedAddresses,
|
||||
)
|
||||
|
||||
var TableIPLDBlock = Table{
|
||||
Name: `ipld.blocks`,
|
||||
Columns: []Column{
|
||||
@ -71,10 +52,9 @@ var TableHeader = Table{
|
||||
{Name: "receipt_root", Type: Dvarchar},
|
||||
{Name: "uncles_hash", Type: Dvarchar},
|
||||
{Name: "bloom", Type: Dbytea},
|
||||
{Name: "timestamp", Type: Dbigint},
|
||||
{Name: "timestamp", Type: Dnumeric},
|
||||
{Name: "coinbase", Type: Dvarchar},
|
||||
{Name: "canonical", Type: Dboolean},
|
||||
{Name: "withdrawals_root", Type: Dvarchar},
|
||||
},
|
||||
UpsertClause: OnConflict("block_number", "block_hash").Set(
|
||||
"parent_hash",
|
||||
@ -90,7 +70,6 @@ var TableHeader = Table{
|
||||
"timestamp",
|
||||
"coinbase",
|
||||
"canonical",
|
||||
"withdrawals_root",
|
||||
)}
|
||||
|
||||
var TableStateNode = Table{
|
||||
@ -186,29 +165,6 @@ var TableLog = Table{
|
||||
UpsertClause: OnConflict("block_number", "header_id", "rct_id", "index"),
|
||||
}
|
||||
|
||||
var TableWithdrawal = Table{
|
||||
Name: "eth.withdrawal_cids",
|
||||
Columns: []Column{
|
||||
{Name: "block_number", Type: Dbigint},
|
||||
{Name: "header_id", Type: Dvarchar},
|
||||
{Name: "cid", Type: Dtext},
|
||||
{Name: "index", Type: Dinteger},
|
||||
{Name: "validator", Type: Dinteger},
|
||||
{Name: "address", Type: Dvarchar},
|
||||
{Name: "amount", Type: Dinteger},
|
||||
},
|
||||
UpsertClause: OnConflict("block_number", "header_id", "index"),
|
||||
}
|
||||
|
||||
var TableBlobHash = Table{
|
||||
Name: "eth.blob_hashes",
|
||||
Columns: []Column{
|
||||
{Name: "tx_hash", Type: Dvarchar},
|
||||
{Name: "index", Type: Dinteger},
|
||||
{Name: "blob_hash", Type: Dbytea},
|
||||
},
|
||||
}
|
||||
|
||||
var TableWatchedAddresses = Table{
|
||||
Name: "eth_meta.watched_addresses",
|
||||
Columns: []Column{
|
||||
|
@ -53,6 +53,34 @@ type Table struct {
|
||||
UpsertClause ConflictClause
|
||||
}
|
||||
|
||||
type colfmt = func(interface{}) string
|
||||
|
||||
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
|
||||
var row []string
|
||||
for i, col := range tbl.Columns {
|
||||
value := col.Type.formatter()(args[i])
|
||||
|
||||
if col.Array {
|
||||
valueList := funk.Map(args[i], col.Type.formatter()).([]string)
|
||||
value = fmt.Sprintf("{%s}", strings.Join(valueList, ","))
|
||||
}
|
||||
|
||||
row = append(row, value)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func (tbl *Table) VarcharColumns() []string {
|
||||
columns := funk.Filter(tbl.Columns, func(col Column) bool {
|
||||
return col.Type == Dvarchar
|
||||
}).([]Column)
|
||||
|
||||
columnNames := funk.Map(columns, func(col Column) string {
|
||||
return col.Name
|
||||
}).([]string)
|
||||
return columnNames
|
||||
}
|
||||
|
||||
func OnConflict(target ...string) ConflictClause {
|
||||
return ConflictClause{Target: target}
|
||||
}
|
||||
@ -61,6 +89,35 @@ func (c ConflictClause) Set(fields ...string) ConflictClause {
|
||||
return c
|
||||
}
|
||||
|
||||
// ToInsertStatement returns a Postgres-compatible SQL insert statement for the table
|
||||
// using positional placeholders
|
||||
func (tbl *Table) ToInsertStatement(upsert bool) string {
|
||||
var colnames, placeholders []string
|
||||
for i, col := range tbl.Columns {
|
||||
colnames = append(colnames, col.Name)
|
||||
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
|
||||
}
|
||||
suffix := fmt.Sprintf("ON CONFLICT (%s)", strings.Join(tbl.UpsertClause.Target, ", "))
|
||||
if upsert && len(tbl.UpsertClause.Update) != 0 {
|
||||
var update_placeholders []string
|
||||
for _, name := range tbl.UpsertClause.Update {
|
||||
i := funk.IndexOf(tbl.Columns, func(col Column) bool { return col.Name == name })
|
||||
update_placeholders = append(update_placeholders, fmt.Sprintf("$%d", i+1))
|
||||
}
|
||||
suffix += fmt.Sprintf(
|
||||
" DO UPDATE SET (%s) = (%s)",
|
||||
strings.Join(tbl.UpsertClause.Update, ", "), strings.Join(update_placeholders, ", "),
|
||||
)
|
||||
} else {
|
||||
suffix += " DO NOTHING"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"INSERT INTO %s (%s) VALUES (%s) %s",
|
||||
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "), suffix,
|
||||
)
|
||||
}
|
||||
|
||||
// TableName returns a pgx-compatible table name.
|
||||
func (tbl *Table) TableName() []string {
|
||||
return strings.Split(tbl.Name, ".")
|
||||
@ -75,45 +132,11 @@ func (tbl *Table) ColumnNames() []string {
|
||||
return names
|
||||
}
|
||||
|
||||
// PreparedInsert returns a pgx/sqlx-compatible SQL prepared insert statement for the table
|
||||
// using positional placeholders.
|
||||
// If upsert is true, include an ON CONFLICT clause handling column updates.
|
||||
func (tbl *Table) PreparedInsert(upsert bool) string {
|
||||
var colnames, placeholders []string
|
||||
for i, col := range tbl.Columns {
|
||||
colnames = append(colnames, col.Name)
|
||||
placeholders = append(placeholders, fmt.Sprintf("$%d", i+1))
|
||||
}
|
||||
suffix := " ON CONFLICT"
|
||||
if len(tbl.UpsertClause.Target) > 0 {
|
||||
suffix += fmt.Sprintf(" (%s)", strings.Join(tbl.UpsertClause.Target, ", "))
|
||||
}
|
||||
if upsert && len(tbl.UpsertClause.Update) != 0 {
|
||||
var update_placeholders []string
|
||||
for _, name := range tbl.UpsertClause.Update {
|
||||
update_placeholders = append(update_placeholders, "EXCLUDED."+name)
|
||||
}
|
||||
suffix += fmt.Sprintf(
|
||||
" DO UPDATE SET (%s) = ROW(%s)",
|
||||
strings.Join(tbl.UpsertClause.Update, ", "), strings.Join(update_placeholders, ", "),
|
||||
)
|
||||
} else {
|
||||
suffix += " DO NOTHING"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"INSERT INTO %s (%s) VALUES (%s)",
|
||||
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "),
|
||||
) + suffix
|
||||
}
|
||||
|
||||
type colfmt = func(interface{}) string
|
||||
|
||||
func sprintf(f string) colfmt {
|
||||
return func(x interface{}) string { return fmt.Sprintf(f, x) }
|
||||
}
|
||||
|
||||
func (typ colType) csvFormatter() colfmt {
|
||||
func (typ colType) formatter() colfmt {
|
||||
switch typ {
|
||||
case Dinteger:
|
||||
return sprintf("%d")
|
||||
@ -134,61 +157,6 @@ func (typ colType) csvFormatter() colfmt {
|
||||
return sprintf("%s")
|
||||
case Dtext:
|
||||
return sprintf("%s")
|
||||
default:
|
||||
panic("invalid column type")
|
||||
}
|
||||
}
|
||||
|
||||
// ToCsvRow converts a list of values to a list of strings suitable for CSV output.
|
||||
func (tbl *Table) ToCsvRow(args ...interface{}) []string {
|
||||
var row []string
|
||||
for i, col := range tbl.Columns {
|
||||
value := col.Type.csvFormatter()(args[i])
|
||||
|
||||
if col.Array {
|
||||
valueList := funk.Map(args[i], col.Type.csvFormatter()).([]string)
|
||||
value = fmt.Sprintf("{%s}", strings.Join(valueList, ","))
|
||||
}
|
||||
|
||||
row = append(row, value)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
// VarcharColumns returns the names of columns with type VARCHAR.
|
||||
func (tbl *Table) VarcharColumns() []string {
|
||||
columns := funk.Filter(tbl.Columns, func(col Column) bool {
|
||||
return col.Type == Dvarchar
|
||||
}).([]Column)
|
||||
|
||||
columnNames := funk.Map(columns, func(col Column) string {
|
||||
return col.Name
|
||||
}).([]string)
|
||||
return columnNames
|
||||
}
|
||||
|
||||
func formatSpec(typ colType) string {
|
||||
switch typ {
|
||||
case Dinteger:
|
||||
return "%d"
|
||||
case Dboolean:
|
||||
return "%t"
|
||||
case Dbytea:
|
||||
return `'\x%x'`
|
||||
default:
|
||||
return "'%s'"
|
||||
}
|
||||
}
|
||||
|
||||
// FmtStringInsert returns a format string for creating a Postgres insert statement.
|
||||
func (tbl *Table) FmtStringInsert() string {
|
||||
var colnames, placeholders []string
|
||||
for _, col := range tbl.Columns {
|
||||
colnames = append(colnames, col.Name)
|
||||
placeholders = append(placeholders, formatSpec(col.Type))
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"INSERT INTO %s (%s) VALUES (%s);",
|
||||
tbl.Name, strings.Join(colnames, ", "), strings.Join(placeholders, ", "),
|
||||
)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
@ -8,55 +8,47 @@ import (
|
||||
. "github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
testTable = Table{
|
||||
Name: "test_table",
|
||||
var testHeaderTable = Table{
|
||||
Name: "eth.header_cids",
|
||||
Columns: []Column{
|
||||
{Name: "id", Type: Dbigint},
|
||||
{Name: "name", Type: Dvarchar},
|
||||
{Name: "age", Type: Dinteger},
|
||||
{Name: "block_number", Type: Dbigint},
|
||||
{Name: "block_hash", Type: Dvarchar},
|
||||
{Name: "parent_hash", Type: Dvarchar},
|
||||
{Name: "cid", Type: Dtext},
|
||||
{Name: "td", Type: Dnumeric},
|
||||
{Name: "node_id", Type: Dvarchar},
|
||||
{Name: "reward", Type: Dnumeric},
|
||||
{Name: "state_root", Type: Dvarchar},
|
||||
{Name: "tx_root", Type: Dvarchar},
|
||||
{Name: "receipt_root", Type: Dvarchar},
|
||||
{Name: "uncle_root", Type: Dvarchar},
|
||||
{Name: "bloom", Type: Dbytea},
|
||||
{Name: "timestamp", Type: Dnumeric},
|
||||
{Name: "mh_key", Type: Dtext},
|
||||
{Name: "times_validated", Type: Dinteger},
|
||||
{Name: "coinbase", Type: Dvarchar},
|
||||
},
|
||||
}
|
||||
testTableWithConflictClause = Table{
|
||||
Name: "test_table_conflict",
|
||||
Columns: []Column{
|
||||
{Name: "id", Type: Dbigint},
|
||||
{Name: "name", Type: Dvarchar},
|
||||
{Name: "age", Type: Dinteger},
|
||||
},
|
||||
UpsertClause: OnConflict("id").Set("name", "age"),
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
expectedHeaderPreparedWithUpsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical, withdrawals_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_number, block_hash) DO UPDATE SET (parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical, withdrawals_root) = ROW(EXCLUDED.parent_hash, EXCLUDED.cid, EXCLUDED.td, EXCLUDED.node_ids, EXCLUDED.reward, EXCLUDED.state_root, EXCLUDED.tx_root, EXCLUDED.receipt_root, EXCLUDED.uncles_hash, EXCLUDED.bloom, EXCLUDED.timestamp, EXCLUDED.coinbase, EXCLUDED.canonical, EXCLUDED.withdrawals_root)"
|
||||
|
||||
expectedHeaderPreparedWithoutUpsert = "INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical, withdrawals_root) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_number, block_hash) DO NOTHING"
|
||||
|
||||
expectedHeaderFmtString = `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_ids, reward, state_root, tx_root, receipt_root, uncles_hash, bloom, timestamp, coinbase, canonical, withdrawals_root) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '\x%x', '%s', '%s', %t, '%s');`
|
||||
)
|
||||
UpsertClause: OnConflict("block_hash", "block_number").Set(
|
||||
"parent_hash",
|
||||
"cid",
|
||||
"td",
|
||||
"node_id",
|
||||
"reward",
|
||||
"state_root",
|
||||
"tx_root",
|
||||
"receipt_root",
|
||||
"uncle_root",
|
||||
"bloom",
|
||||
"timestamp",
|
||||
"mh_key",
|
||||
"times_validated",
|
||||
"coinbase",
|
||||
),
|
||||
}
|
||||
|
||||
func TestTable(t *testing.T) {
|
||||
require.Equal(t,
|
||||
"INSERT INTO test_table (id, name, age) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING",
|
||||
testTable.PreparedInsert(true),
|
||||
)
|
||||
require.Equal(t,
|
||||
"INSERT INTO test_table (id, name, age) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING",
|
||||
testTable.PreparedInsert(false),
|
||||
)
|
||||
require.Equal(t, "INSERT INTO test_table (id, name, age) VALUES ('%s', '%s', %d);", testTable.FmtStringInsert())
|
||||
|
||||
require.Equal(t,
|
||||
"INSERT INTO test_table_conflict (id, name, age) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET (name, age) = ROW(EXCLUDED.name, EXCLUDED.age)",
|
||||
testTableWithConflictClause.PreparedInsert(true),
|
||||
)
|
||||
require.Equal(t,
|
||||
"INSERT INTO test_table_conflict (id, name, age) VALUES ($1, $2, $3) ON CONFLICT (id) DO NOTHING",
|
||||
testTableWithConflictClause.PreparedInsert(false),
|
||||
)
|
||||
|
||||
require.Equal(t, expectedHeaderPreparedWithUpsert, TableHeader.PreparedInsert(true))
|
||||
require.Equal(t, expectedHeaderPreparedWithoutUpsert, TableHeader.PreparedInsert(false))
|
||||
require.Equal(t, expectedHeaderFmtString, TableHeader.FmtStringInsert())
|
||||
headerUpsert := `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash, block_number) DO UPDATE SET (parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) = ($3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)`
|
||||
headerNoUpsert := `INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated, coinbase) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT (block_hash, block_number) DO NOTHING`
|
||||
require.Equal(t, headerNoUpsert, testHeaderTable.ToInsertStatement(false))
|
||||
require.Equal(t, headerUpsert, testHeaderTable.ToInsertStatement(true))
|
||||
}
|
||||
|
@ -33,9 +33,10 @@ import (
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/mocks"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/models"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/shared"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/test_helpers"
|
||||
)
|
||||
|
||||
// SetupTestData indexes a single mock block along with its state nodes
|
||||
// SetupTestData indexes a single mock block along with it's state nodes
|
||||
func SetupTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
|
||||
var tx interfaces.Batch
|
||||
tx, err = ind.PushBlock(
|
||||
@ -109,14 +110,16 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, len(txCIDs), len(trxs))
|
||||
for _, c := range txCIDs {
|
||||
require.Contains(t, trxs, c.String())
|
||||
}
|
||||
require.Equal(t, 5, len(trxs))
|
||||
expectTrue(t, test_helpers.ListContainsString(trxs, trx1CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(trxs, trx2CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(trxs, trx3CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(trxs, trx4CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(trxs, trx5CID.String()))
|
||||
|
||||
transactions := mocks.MockBlock.Transactions()
|
||||
type txResult struct {
|
||||
TxType int `db:"tx_type"`
|
||||
TxType uint8 `db:"tx_type"`
|
||||
Value string
|
||||
}
|
||||
for _, c := range trxs {
|
||||
@ -130,11 +133,9 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
txTypeAndValueStr := `SELECT tx_type, CAST(value as TEXT) FROM eth.transaction_cids WHERE cid = $1`
|
||||
txBlobHashQuery := `SELECT blob_hash FROM eth.blob_hashes WHERE tx_hash = $1`
|
||||
txBlobIndexQuery := `SELECT index FROM eth.blob_hashes WHERE tx_hash = $1`
|
||||
switch c {
|
||||
case txCIDs[0].String():
|
||||
require.Equal(t, encodedTxs[0], data)
|
||||
case trx1CID.String():
|
||||
require.Equal(t, tx1, data)
|
||||
txRes := new(txResult)
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
@ -146,8 +147,8 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if txRes.Value != transactions[0].Value().String() {
|
||||
t.Fatalf("expected tx value %s got %s", transactions[0].Value().String(), txRes.Value)
|
||||
}
|
||||
case txCIDs[1].String():
|
||||
require.Equal(t, encodedTxs[1], data)
|
||||
case trx2CID.String():
|
||||
require.Equal(t, tx2, data)
|
||||
txRes := new(txResult)
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
@ -159,8 +160,8 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if txRes.Value != transactions[1].Value().String() {
|
||||
t.Fatalf("expected tx value %s got %s", transactions[1].Value().String(), txRes.Value)
|
||||
}
|
||||
case txCIDs[2].String():
|
||||
require.Equal(t, encodedTxs[2], data)
|
||||
case trx3CID.String():
|
||||
require.Equal(t, tx3, data)
|
||||
txRes := new(txResult)
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
@ -172,8 +173,8 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if txRes.Value != transactions[2].Value().String() {
|
||||
t.Fatalf("expected tx value %s got %s", transactions[2].Value().String(), txRes.Value)
|
||||
}
|
||||
case txCIDs[3].String():
|
||||
require.Equal(t, encodedTxs[3], data)
|
||||
case trx4CID.String():
|
||||
require.Equal(t, tx4, data)
|
||||
txRes := new(txResult)
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
@ -185,8 +186,8 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if txRes.Value != transactions[3].Value().String() {
|
||||
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
|
||||
}
|
||||
case txCIDs[4].String():
|
||||
require.Equal(t, encodedTxs[4], data)
|
||||
case trx5CID.String():
|
||||
require.Equal(t, tx5, data)
|
||||
txRes := new(txResult)
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
@ -198,28 +199,6 @@ func DoTestPublishAndIndexTransactionIPLDs(t *testing.T, db sql.Database) {
|
||||
if txRes.Value != transactions[4].Value().String() {
|
||||
t.Fatalf("expected tx value %s got %s", transactions[4].Value().String(), txRes.Value)
|
||||
}
|
||||
case txCIDs[5].String():
|
||||
require.Equal(t, encodedTxs[5], data)
|
||||
var txRes txResult
|
||||
err = db.QueryRow(context.Background(), txTypeAndValueStr, c).Scan(&txRes.TxType, &txRes.Value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, types.BlobTxType, txRes.TxType)
|
||||
require.Equal(t, transactions[5].Value().String(), txRes.Value)
|
||||
|
||||
var txBlobHashes []common.Hash
|
||||
var txBlobIndices []uint64
|
||||
err = db.Select(context.Background(), &txBlobHashes, txBlobHashQuery, transactions[5].Hash().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, transactions[5].BlobHashes(), txBlobHashes)
|
||||
err = db.Select(context.Background(), &txBlobIndices, txBlobIndexQuery, transactions[5].Hash().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, []uint64{0, 1}, txBlobIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -277,10 +256,12 @@ func DoTestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, len(rctCIDs), len(rcts))
|
||||
for _, c := range rctCIDs {
|
||||
require.Contains(t, rcts, c.String())
|
||||
}
|
||||
require.Equal(t, 5, len(rcts))
|
||||
expectTrue(t, test_helpers.ListContainsString(rcts, rct1CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(rcts, rct2CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(rcts, rct3CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(rcts, rct4CID.String()))
|
||||
expectTrue(t, test_helpers.ListContainsString(rcts, rct5CID.String()))
|
||||
|
||||
for idx, c := range rcts {
|
||||
result := make([]models.IPLDModel, 0)
|
||||
@ -309,8 +290,8 @@ func DoTestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
||||
|
||||
postStatePgStr := `SELECT post_state FROM eth.receipt_cids WHERE cid = $1`
|
||||
switch c {
|
||||
case rctCIDs[0].String():
|
||||
require.Equal(t, encodedRcts[0], data)
|
||||
case rct1CID.String():
|
||||
require.Equal(t, rct1, data)
|
||||
var postStatus uint64
|
||||
pgStr = `SELECT post_status FROM eth.receipt_cids WHERE cid = $1`
|
||||
err = db.Get(context.Background(), &postStatus, pgStr, c)
|
||||
@ -318,81 +299,38 @@ func DoTestPublishAndIndexReceiptIPLDs(t *testing.T, db sql.Database) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostStatus, postStatus)
|
||||
case rctCIDs[1].String():
|
||||
require.Equal(t, encodedRcts[1], data)
|
||||
case rct2CID.String():
|
||||
require.Equal(t, rct2, data)
|
||||
var postState string
|
||||
err = db.Get(context.Background(), &postState, postStatePgStr, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostState1, postState)
|
||||
case rctCIDs[2].String():
|
||||
require.Equal(t, encodedRcts[2], data)
|
||||
case rct3CID.String():
|
||||
require.Equal(t, rct3, data)
|
||||
var postState string
|
||||
err = db.Get(context.Background(), &postState, postStatePgStr, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostState2, postState)
|
||||
case rctCIDs[3].String():
|
||||
require.Equal(t, encodedRcts[3], data)
|
||||
case rct4CID.String():
|
||||
require.Equal(t, rct4, data)
|
||||
var postState string
|
||||
err = db.Get(context.Background(), &postState, postStatePgStr, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostState3, postState)
|
||||
case rctCIDs[4].String():
|
||||
require.Equal(t, encodedRcts[4], data)
|
||||
case rct5CID.String():
|
||||
require.Equal(t, rct5, data)
|
||||
var postState string
|
||||
err = db.Get(context.Background(), &postState, postStatePgStr, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostState3, postState)
|
||||
case rctCIDs[5].String():
|
||||
require.Equal(t, encodedRcts[5], data)
|
||||
var postState string
|
||||
err = db.Get(context.Background(), &postState, postStatePgStr, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, mocks.ExpectedPostState3, postState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func DoTestPublishAndIndexWithdrawalIPLDs(t *testing.T, db sql.Database) {
|
||||
// check that withdrawals were properly indexed and published
|
||||
wds := make([]string, 0)
|
||||
pgStr := `SELECT withdrawal_cids.cid FROM eth.withdrawal_cids
|
||||
INNER JOIN eth.header_cids ON (withdrawal_cids.header_id = header_cids.block_hash)
|
||||
WHERE header_cids.block_number = $1
|
||||
ORDER BY withdrawal_cids.index`
|
||||
err = db.Select(context.Background(), &wds, pgStr, mocks.BlockNumber.Uint64())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, 2, len(wds))
|
||||
require.Contains(t, wds, wd1CID.String())
|
||||
require.Contains(t, wds, wd2CID.String())
|
||||
|
||||
for _, c := range wds {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var data []byte
|
||||
err = db.Get(context.Background(), &data, ipfsPgGet, dc.String(), mocks.BlockNumber.Uint64())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
switch c {
|
||||
case wd1CID.String():
|
||||
require.Equal(t, wd1, data)
|
||||
case wd2CID.String():
|
||||
require.Equal(t, wd2, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -656,7 +594,7 @@ func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) {
|
||||
func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
|
||||
// check indexed headers
|
||||
pgStr := `SELECT CAST(block_number as TEXT), block_hash, cid, cast(td AS TEXT), cast(reward AS TEXT),
|
||||
tx_root, receipt_root, uncles_hash, coinbase, withdrawals_root
|
||||
tx_root, receipt_root, uncles_hash, coinbase
|
||||
FROM eth.header_cids
|
||||
ORDER BY block_number`
|
||||
headerRes := make([]models.HeaderModel, 0)
|
||||
@ -678,7 +616,6 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
|
||||
RctRoot: mockBlock.ReceiptHash().String(),
|
||||
UnclesHash: mockBlock.UncleHash().String(),
|
||||
Coinbase: mocks.MockHeader.Coinbase.String(),
|
||||
WithdrawalsRoot: shared.MaybeStringHash(mockBlock.Header().WithdrawalsHash),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||
@ -689,7 +626,6 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
|
||||
RctRoot: mockNonCanonicalBlock.ReceiptHash().String(),
|
||||
UnclesHash: mockNonCanonicalBlock.UncleHash().String(),
|
||||
Coinbase: mocks.MockNonCanonicalHeader.Coinbase.String(),
|
||||
WithdrawalsRoot: shared.MaybeStringHash(mockNonCanonicalBlock.Header().WithdrawalsHash),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||
@ -700,7 +636,6 @@ func TestPublishAndIndexHeaderNonCanonical(t *testing.T, db sql.Database) {
|
||||
RctRoot: mockNonCanonicalBlock2.ReceiptHash().String(),
|
||||
UnclesHash: mockNonCanonicalBlock2.UncleHash().String(),
|
||||
Coinbase: mocks.MockNonCanonicalHeader2.Coinbase.String(),
|
||||
WithdrawalsRoot: shared.MaybeStringHash(mockNonCanonicalBlock2.Header().WithdrawalsHash),
|
||||
},
|
||||
}
|
||||
expectedRes[0].Reward = shared.CalcEthBlockReward(mockBlock.Header(), mockBlock.Uncles(), mockBlock.Transactions(), mocks.MockReceipts).String()
|
||||
@ -751,19 +686,62 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
|
||||
// expected transactions in the canonical block
|
||||
mockBlockTxs := mocks.MockBlock.Transactions()
|
||||
expectedBlockTxs := make([]models.TxModel, len(mockBlockTxs))
|
||||
for i, tx := range mockBlockTxs {
|
||||
expectedBlockTxs[i] = models.TxModel{
|
||||
expectedBlockTxs := []models.TxModel{
|
||||
{
|
||||
BlockNumber: mockBlock.Number().String(),
|
||||
HeaderID: mockBlock.Hash().String(),
|
||||
TxHash: tx.Hash().String(),
|
||||
CID: txCIDs[i].String(),
|
||||
Dst: shared.HandleZeroAddrPointer(tx.To()),
|
||||
TxHash: mockBlockTxs[0].Hash().String(),
|
||||
CID: trx1CID.String(),
|
||||
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[0].To()),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: int64(i),
|
||||
Type: tx.Type(),
|
||||
Value: tx.Value().String(),
|
||||
}
|
||||
Index: 0,
|
||||
Type: mockBlockTxs[0].Type(),
|
||||
Value: mockBlockTxs[0].Value().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockBlock.Number().String(),
|
||||
HeaderID: mockBlock.Hash().String(),
|
||||
TxHash: mockBlockTxs[1].Hash().String(),
|
||||
CID: trx2CID.String(),
|
||||
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[1].To()),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 1,
|
||||
Type: mockBlockTxs[1].Type(),
|
||||
Value: mockBlockTxs[1].Value().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockBlock.Number().String(),
|
||||
HeaderID: mockBlock.Hash().String(),
|
||||
TxHash: mockBlockTxs[2].Hash().String(),
|
||||
CID: trx3CID.String(),
|
||||
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[2].To()),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 2,
|
||||
Type: mockBlockTxs[2].Type(),
|
||||
Value: mockBlockTxs[2].Value().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockBlock.Number().String(),
|
||||
HeaderID: mockBlock.Hash().String(),
|
||||
TxHash: mockBlockTxs[3].Hash().String(),
|
||||
CID: trx4CID.String(),
|
||||
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[3].To()),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 3,
|
||||
Type: mockBlockTxs[3].Type(),
|
||||
Value: mockBlockTxs[3].Value().String(),
|
||||
},
|
||||
{
|
||||
BlockNumber: mockBlock.Number().String(),
|
||||
HeaderID: mockBlock.Hash().String(),
|
||||
TxHash: mockBlockTxs[4].Hash().String(),
|
||||
CID: trx5CID.String(),
|
||||
Dst: shared.HandleZeroAddrPointer(mockBlockTxs[4].To()),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 4,
|
||||
Type: mockBlockTxs[4].Type(),
|
||||
Value: mockBlockTxs[4].Value().String(),
|
||||
},
|
||||
}
|
||||
|
||||
// expected transactions in the non-canonical block at London height
|
||||
@ -773,7 +751,7 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||
HeaderID: mockNonCanonicalBlock.Hash().String(),
|
||||
TxHash: mockNonCanonicalBlockTxs[0].Hash().String(),
|
||||
CID: txCIDs[1].String(),
|
||||
CID: trx2CID.String(),
|
||||
Dst: mockNonCanonicalBlockTxs[0].To().String(),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 0,
|
||||
@ -784,7 +762,7 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
BlockNumber: mockNonCanonicalBlock.Number().String(),
|
||||
HeaderID: mockNonCanonicalBlock.Hash().String(),
|
||||
TxHash: mockNonCanonicalBlockTxs[1].Hash().String(),
|
||||
CID: txCIDs[4].String(),
|
||||
CID: trx5CID.String(),
|
||||
Dst: mockNonCanonicalBlockTxs[1].To().String(),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 1,
|
||||
@ -800,7 +778,7 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||
HeaderID: mockNonCanonicalBlock2.Hash().String(),
|
||||
TxHash: mockNonCanonicalBlock2Txs[0].Hash().String(),
|
||||
CID: txCIDs[2].String(),
|
||||
CID: trx3CID.String(),
|
||||
Dst: "",
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 0,
|
||||
@ -811,7 +789,7 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
BlockNumber: mockNonCanonicalBlock2.Number().String(),
|
||||
HeaderID: mockNonCanonicalBlock2.Hash().String(),
|
||||
TxHash: mockNonCanonicalBlock2Txs[1].Hash().String(),
|
||||
CID: txCIDs[4].String(),
|
||||
CID: trx5CID.String(),
|
||||
Dst: mockNonCanonicalBlock2Txs[1].To().String(),
|
||||
Src: mocks.SenderAddr.String(),
|
||||
Index: 1,
|
||||
@ -848,12 +826,14 @@ func DoTestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database
|
||||
// check indexed IPLD blocks
|
||||
var data []byte
|
||||
|
||||
txCIDs := []cid.Cid{trx1CID, trx2CID, trx3CID, trx4CID, trx5CID}
|
||||
txRLPs := [][]byte{tx1, tx2, tx3, tx4, tx5}
|
||||
for i, txCID := range txCIDs {
|
||||
err = db.Get(context.Background(), &data, ipfsPgGet, txCID.String(), mocks.BlockNumber.Uint64())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, encodedTxs[i], data)
|
||||
require.Equal(t, txRLPs[i], data)
|
||||
}
|
||||
}
|
||||
|
||||
@ -869,10 +849,11 @@ func DoTestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
|
||||
}
|
||||
|
||||
// expected receipts in the canonical block
|
||||
rctCids := []cid.Cid{rct1CID, rct2CID, rct3CID, rct4CID, rct5CID}
|
||||
expectedBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockReceipts))
|
||||
for i, mockBlockRct := range mocks.MockReceipts {
|
||||
rctModel := createRctModel(mockBlockRct, rctCIDs[i], mockBlock.Number().String())
|
||||
expectedBlockRctsMap[rctCIDs[i].String()] = rctModel
|
||||
rctModel := createRctModel(mockBlockRct, rctCids[i], mockBlock.Number().String())
|
||||
expectedBlockRctsMap[rctCids[i].String()] = rctModel
|
||||
}
|
||||
|
||||
// expected receipts in the non-canonical block at London height
|
||||
@ -927,8 +908,10 @@ func DoTestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
|
||||
// check indexed rct IPLD blocks
|
||||
var data []byte
|
||||
|
||||
rctRLPs := append(encodedRcts, nonCanonicalBlockRct1, nonCanonicalBlockRct2)
|
||||
for i, rctCid := range append(rctCIDs, nonCanonicalBlockRctCids...) {
|
||||
rctRLPs := [][]byte{
|
||||
rct1, rct2, rct3, rct4, rct5, nonCanonicalBlockRct1, nonCanonicalBlockRct2,
|
||||
}
|
||||
for i, rctCid := range append(rctCids, nonCanonicalBlockRctCids...) {
|
||||
err = db.Get(context.Background(), &data, ipfsPgGet, rctCid.String(), mocks.BlockNumber.Uint64())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -37,14 +37,13 @@ var (
|
||||
WHERE key = $1 AND block_number = $2`
|
||||
watchedAddressesPgGet = `SELECT *
|
||||
FROM eth_meta.watched_addresses`
|
||||
encodedTxs, encodedRcts [][]byte
|
||||
wd1, wd2 []byte
|
||||
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
|
||||
nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte
|
||||
nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte
|
||||
mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block
|
||||
headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid
|
||||
txCIDs, rctCIDs []cid.Cid
|
||||
wd1CID, wd2CID cid.Cid
|
||||
trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
|
||||
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
|
||||
nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid
|
||||
nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid
|
||||
state1CID, state2CID, storageCID cid.Cid
|
||||
@ -63,36 +62,56 @@ func init() {
|
||||
mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2
|
||||
nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts
|
||||
|
||||
// encode mock txs and receipts
|
||||
// encode mock receipts
|
||||
buf := new(bytes.Buffer)
|
||||
encodedTxs = make([][]byte, len(txs))
|
||||
encodedRcts = make([][]byte, len(rcts))
|
||||
|
||||
for i := 0; i < len(txs); i++ {
|
||||
txs.EncodeIndex(i, buf)
|
||||
tx := make([]byte, buf.Len())
|
||||
copy(tx, buf.Bytes())
|
||||
buf.Reset()
|
||||
encodedTxs[i] = tx
|
||||
}
|
||||
|
||||
for i := 0; i < len(rcts); i++ {
|
||||
rcts.EncodeIndex(i, buf)
|
||||
rct := make([]byte, buf.Len())
|
||||
copy(rct, buf.Bytes())
|
||||
buf.Reset()
|
||||
encodedRcts[i] = rct
|
||||
}
|
||||
|
||||
// encode mock withdrawals
|
||||
mocks.MockWithdrawals.EncodeIndex(0, buf)
|
||||
wd1 = make([]byte, buf.Len())
|
||||
copy(wd1, buf.Bytes())
|
||||
txs.EncodeIndex(0, buf)
|
||||
tx1 = make([]byte, buf.Len())
|
||||
copy(tx1, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
mocks.MockWithdrawals.EncodeIndex(1, buf)
|
||||
wd2 = make([]byte, buf.Len())
|
||||
copy(wd2, buf.Bytes())
|
||||
txs.EncodeIndex(1, buf)
|
||||
tx2 = make([]byte, buf.Len())
|
||||
copy(tx2, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
txs.EncodeIndex(2, buf)
|
||||
tx3 = make([]byte, buf.Len())
|
||||
copy(tx3, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
txs.EncodeIndex(3, buf)
|
||||
tx4 = make([]byte, buf.Len())
|
||||
copy(tx4, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
txs.EncodeIndex(4, buf)
|
||||
tx5 = make([]byte, buf.Len())
|
||||
copy(tx5, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
rcts.EncodeIndex(0, buf)
|
||||
rct1 = make([]byte, buf.Len())
|
||||
copy(rct1, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
rcts.EncodeIndex(1, buf)
|
||||
rct2 = make([]byte, buf.Len())
|
||||
copy(rct2, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
rcts.EncodeIndex(2, buf)
|
||||
rct3 = make([]byte, buf.Len())
|
||||
copy(rct3, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
rcts.EncodeIndex(3, buf)
|
||||
rct4 = make([]byte, buf.Len())
|
||||
copy(rct4, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
rcts.EncodeIndex(4, buf)
|
||||
rct5 = make([]byte, buf.Len())
|
||||
copy(rct5, buf.Bytes())
|
||||
buf.Reset()
|
||||
|
||||
// encode mock receipts for non-canonical blocks
|
||||
@ -119,23 +138,19 @@ func init() {
|
||||
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
|
||||
mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256)
|
||||
mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256)
|
||||
|
||||
for i := 0; i < len(txs); i++ {
|
||||
tx, _ := ipld.RawdataToCid(ipld.MEthTx, encodedTxs[i], multihash.KECCAK_256)
|
||||
txCIDs = append(txCIDs, tx)
|
||||
}
|
||||
|
||||
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
|
||||
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
|
||||
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
|
||||
trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256)
|
||||
trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256)
|
||||
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
|
||||
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
|
||||
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
|
||||
|
||||
for i := 0; i < len(rcts); i++ {
|
||||
rct, _ := ipld.RawdataToCid(ipld.MEthTxReceipt, encodedRcts[i], multihash.KECCAK_256)
|
||||
rctCIDs = append(rctCIDs, rct)
|
||||
}
|
||||
|
||||
wd1CID, _ = ipld.RawdataToCid(ipld.MEthWithdrawal, wd1, multihash.KECCAK_256)
|
||||
wd2CID, _ = ipld.RawdataToCid(ipld.MEthWithdrawal, wd2, multihash.KECCAK_256)
|
||||
rct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct1, multihash.KECCAK_256)
|
||||
rct2CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct2, multihash.KECCAK_256)
|
||||
rct3CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct3, multihash.KECCAK_256)
|
||||
rct4CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct4, multihash.KECCAK_256)
|
||||
rct5CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, rct5, multihash.KECCAK_256)
|
||||
|
||||
// create raw receipts for non-canonical blocks
|
||||
nonCanonicalBlockRct1CID, _ = ipld.RawdataToCid(ipld.MEthTxReceipt, nonCanonicalBlockRct1, multihash.KECCAK_256)
|
||||
|
@ -19,16 +19,22 @@ package test_helpers
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/database/sql/postgres"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/shared/schema"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// ListContainsString used to check if a list of strings contains a particular string
|
||||
func ListContainsString(sss []string, s string) bool {
|
||||
for _, str := range sss {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// DedupFile removes duplicates from the given file
|
||||
func DedupFile(filePath string) error {
|
||||
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDONLY, os.ModePerm)
|
||||
@ -43,6 +49,9 @@ func DedupFile(filePath string) error {
|
||||
s := sc.Text()
|
||||
stmts[s] = struct{}{}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
@ -61,30 +70,30 @@ func DedupFile(filePath string) error {
|
||||
|
||||
// TearDownDB is used to tear down the watcher dbs after tests
|
||||
func TearDownDB(t *testing.T, db sql.Database) {
|
||||
err := ClearDB(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ClearSqlxDB(sqlxdb *sqlx.DB) error {
|
||||
driver := postgres.NewSQLXDriver(context.Background(), sqlxdb)
|
||||
db := postgres.NewPostgresDB(driver, false)
|
||||
return ClearDB(db)
|
||||
}
|
||||
|
||||
func ClearDB(db sql.Database) error {
|
||||
ctx := context.Background()
|
||||
tx, err := db.Begin(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, tbl := range schema.AllTables {
|
||||
stm := fmt.Sprintf("TRUNCATE %s", tbl.Name)
|
||||
statements := []string{
|
||||
`TRUNCATE nodes`,
|
||||
`TRUNCATE ipld.blocks`,
|
||||
`TRUNCATE eth.header_cids`,
|
||||
`TRUNCATE eth.uncle_cids`,
|
||||
`TRUNCATE eth.transaction_cids`,
|
||||
`TRUNCATE eth.receipt_cids`,
|
||||
`TRUNCATE eth.state_cids`,
|
||||
`TRUNCATE eth.storage_cids`,
|
||||
`TRUNCATE eth.log_cids`,
|
||||
`TRUNCATE eth_meta.watched_addresses`,
|
||||
}
|
||||
for _, stm := range statements {
|
||||
if _, err = tx.Exec(ctx, stm); err != nil {
|
||||
return fmt.Errorf("error executing `%s`: %w", stm, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
return tx.Commit(ctx)
|
||||
if err = tx.Commit(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/cerc-io/eth-testing/chains/mainnet"
|
||||
"github.com/cerc-io/eth-testing/chaindata/mainnet"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -30,8 +30,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/holiman/uint256"
|
||||
|
||||
statediff "github.com/cerc-io/plugeth-statediff"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/ipld"
|
||||
@ -53,7 +51,7 @@ var (
|
||||
// block 1 data
|
||||
block1CoinbaseAccount = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(5000000000000000000),
|
||||
Balance: big.NewInt(5000000000000000000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -126,7 +124,7 @@ var (
|
||||
// block 2 data
|
||||
block2CoinbaseAccount = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.NewInt(5000000000000000000),
|
||||
Balance: big.NewInt(5000000000000000000),
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -139,7 +137,7 @@ var (
|
||||
block2MovedPremineBalance, _ = new(big.Int).SetString("4000000000000000000000", 10)
|
||||
block2MovedPremineAccount = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.MustFromBig(block2MovedPremineBalance),
|
||||
Balance: block2MovedPremineBalance,
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -231,10 +229,10 @@ var (
|
||||
|
||||
// block3 data
|
||||
// path 060e0f
|
||||
block3CoinbaseBalance, _ = new(big.Int).SetString("5156250000000000000", 10)
|
||||
blcok3CoinbaseBalance, _ = new(big.Int).SetString("5156250000000000000", 10)
|
||||
block3CoinbaseAccount = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.MustFromBig(block3CoinbaseBalance),
|
||||
Balance: blcok3CoinbaseBalance,
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -248,7 +246,7 @@ var (
|
||||
block3MovedPremineBalance1, _ = new(big.Int).SetString("3750000000000000000", 10)
|
||||
block3MovedPremineAccount1 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.MustFromBig(block3MovedPremineBalance1),
|
||||
Balance: block3MovedPremineBalance1,
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -262,7 +260,7 @@ var (
|
||||
block3MovedPremineBalance2, _ = new(big.Int).SetString("1999944000000000000000", 10)
|
||||
block3MovedPremineAccount2 = &types.StateAccount{
|
||||
Nonce: 0,
|
||||
Balance: uint256.MustFromBig(block3MovedPremineBalance2),
|
||||
Balance: block3MovedPremineBalance2,
|
||||
CodeHash: test_helpers.NullCodeHash.Bytes(),
|
||||
Root: test_helpers.EmptyContractRoot,
|
||||
}
|
||||
@ -422,7 +420,7 @@ var (
|
||||
|
||||
func init() {
|
||||
db = rawdb.NewMemoryDatabase()
|
||||
genesisBlock = core.DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, nil))
|
||||
genesisBlock = core.DefaultGenesisBlock().MustCommit(db)
|
||||
|
||||
blocks := mainnet.GetBlocks()
|
||||
block0 = blocks[0]
|
||||
|
@ -28,10 +28,10 @@ func countStateDiffBegin(block *types.Block, logger log.Logger) time.Time {
|
||||
|
||||
defaultStatediffMetrics.underway.Inc(1)
|
||||
logger.Debug("writeStateDiff BEGIN",
|
||||
"underway", defaultStatediffMetrics.underway.Snapshot().Count(),
|
||||
"succeeded", defaultStatediffMetrics.succeeded.Snapshot().Count(),
|
||||
"failed", defaultStatediffMetrics.failed.Snapshot().Count(),
|
||||
"total_time", defaultStatediffMetrics.totalProcessingTime.Snapshot().Value(),
|
||||
"underway", defaultStatediffMetrics.underway.Count(),
|
||||
"succeeded", defaultStatediffMetrics.succeeded.Count(),
|
||||
"failed", defaultStatediffMetrics.failed.Count(),
|
||||
"total_time", defaultStatediffMetrics.totalProcessingTime.Value(),
|
||||
)
|
||||
|
||||
return start
|
||||
@ -51,10 +51,10 @@ func countStateDiffEnd(start time.Time, logger log.Logger, err *error) time.Dura
|
||||
logger.Debug("writeStateDiff END",
|
||||
"duration", duration,
|
||||
"error", failed,
|
||||
"underway", defaultStatediffMetrics.underway.Snapshot().Count(),
|
||||
"succeeded", defaultStatediffMetrics.succeeded.Snapshot().Count(),
|
||||
"failed", defaultStatediffMetrics.failed.Snapshot().Count(),
|
||||
"total_time", defaultStatediffMetrics.totalProcessingTime.Snapshot().Value(),
|
||||
"underway", defaultStatediffMetrics.underway.Count(),
|
||||
"succeeded", defaultStatediffMetrics.succeeded.Count(),
|
||||
"failed", defaultStatediffMetrics.failed.Count(),
|
||||
"total_time", defaultStatediffMetrics.totalProcessingTime.Value(),
|
||||
)
|
||||
|
||||
return duration
|
||||
@ -68,8 +68,8 @@ func countApiRequestBegin(methodName string, blockHashOrNumber interface{}) (tim
|
||||
defaultStatediffMetrics.apiRequestsUnderway.Inc(1)
|
||||
|
||||
logger.Debug("statediff API BEGIN",
|
||||
"underway", defaultStatediffMetrics.apiRequestsUnderway.Snapshot().Count(),
|
||||
"requests", defaultStatediffMetrics.apiRequests.Snapshot().Count(),
|
||||
"underway", defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
||||
"requests", defaultStatediffMetrics.apiRequests.Count(),
|
||||
)
|
||||
|
||||
return start, logger
|
||||
@ -82,8 +82,8 @@ func countApiRequestEnd(start time.Time, logger log.Logger, err error) time.Dura
|
||||
logger.Debug("statediff API END",
|
||||
"duration", duration,
|
||||
"error", err != nil,
|
||||
"underway", defaultStatediffMetrics.apiRequestsUnderway.Snapshot().Count(),
|
||||
"requests", defaultStatediffMetrics.apiRequests.Snapshot().Count(),
|
||||
"underway", defaultStatediffMetrics.apiRequestsUnderway.Count(),
|
||||
"requests", defaultStatediffMetrics.apiRequests.Count(),
|
||||
)
|
||||
|
||||
return duration
|
||||
|
32
scripts/integration-setup.sh
Executable file
32
scripts/integration-setup.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
# Builds and deploys a stack with only what we need.
|
||||
# This script assumes we are running in the project root.
|
||||
|
||||
set -e
|
||||
|
||||
cluster="${1:-test}"
|
||||
laconic_so="${LACONIC_SO:-laconic-so} --stack fixturenet-plugeth-tx --verbose"
|
||||
|
||||
CONFIG_DIR=$(readlink -f "${CONFIG_DIR:-$(mktemp -d)}")
|
||||
|
||||
# Point stack-orchestrator to the multi-project root
|
||||
export CERC_REPO_BASE_DIR="${CERC_REPO_BASE_DIR:-$(git rev-parse --show-toplevel)/..}"
|
||||
|
||||
# v5 migrations only go up to version 18
|
||||
echo CERC_STATEDIFF_DB_GOOSE_MIN_VER=18 >> $CONFIG_DIR/stack.env
|
||||
|
||||
set -x
|
||||
|
||||
if [[ -z $SKIP_BUILD ]]; then
|
||||
$laconic_so setup-repositories \
|
||||
--exclude github.com/dboreham/foundry,github.com/cerc-io/tx-spammer,github.com/cerc-io/ipld-eth-server,git.vdb.to/cerc-io/plugeth,git.vdb.to/cerc-io/plugeth-statediff \
|
||||
--branches-file ./test/stack-refs.txt
|
||||
|
||||
$laconic_so build-containers \
|
||||
--exclude cerc/ipld-eth-server,cerc/keycloak,cerc/tx-spammer,cerc/foundry,cerc/plugeth,cerc/plugeth-statediff
|
||||
fi
|
||||
|
||||
$laconic_so deploy \
|
||||
--exclude foundry,keycloak,tx-spammer,ipld-eth-server \
|
||||
--env-file $CONFIG_DIR/stack.env \
|
||||
--cluster "$cluster" up
|
@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Note: stack path should be absolute, otherwise SO looks for it in packaged stacks
|
||||
stack_dir=$(readlink -f "${1:-../fixturenet-eth-stacks/stack-orchestrator/stacks/fixturenet-plugeth}")
|
||||
|
||||
[[ -d "$stack_dir" ]]
|
||||
|
||||
CONFIG_DIR=$(readlink -f "${CONFIG_DIR:-$(mktemp -d)}")
|
||||
# By default assume we are running in the project root.
|
||||
export CERC_REPO_BASE_DIR="${CERC_REPO_BASE_DIR:-$(git rev-parse --show-toplevel)/..}"
|
||||
|
||||
laconic_so="laconic-so --verbose --stack $stack_dir"
|
||||
|
||||
# Don't run geth/plugeth in the debugger, it will swallow error backtraces
|
||||
echo CERC_REMOTE_DEBUG=false >> $CONFIG_DIR/stack.env
|
||||
|
||||
|
||||
if [[ -z $SKIP_BUILD ]]; then
|
||||
$laconic_so setup-repositories \
|
||||
--exclude git.vdb.to/cerc-io/plugeth-statediff
|
||||
# Assume the tested image has been built separately
|
||||
$laconic_so build-containers \
|
||||
--exclude cerc/plugeth-statediff
|
||||
fi
|
||||
|
||||
if ! $laconic_so deploy \
|
||||
--env-file $CONFIG_DIR/stack.env \
|
||||
--cluster test up
|
||||
then
|
||||
$laconic_so deploy --cluster test logs
|
||||
exit 1
|
||||
fi
|
@ -245,7 +245,7 @@ func (sds *Service) WriteLoop(chainEventCh chan core.ChainEvent) {
|
||||
select {
|
||||
case event := <-chainEventCh:
|
||||
// First process metrics for chain events, then forward to workers
|
||||
lastHeight := uint64(defaultStatediffMetrics.lastEventHeight.Snapshot().Value())
|
||||
lastHeight := uint64(defaultStatediffMetrics.lastEventHeight.Value())
|
||||
if lastHeight == 0 {
|
||||
lastHeight = initialPos.indexerBlockNumber
|
||||
}
|
||||
@ -825,7 +825,7 @@ func (sds *Service) writeStateDiff(block *types.Block, parentRoot common.Hash, p
|
||||
}
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
// TODO: review necessity of locking here
|
||||
// TODO: review/remove the need to sync here
|
||||
var nodeMtx, ipldMtx sync.Mutex
|
||||
nodeSink := func(node types2.StateLeafNode) error {
|
||||
defer metrics.UpdateDuration(time.Now(), metrics.IndexerMetrics.OutputTimer)
|
||||
|
@ -3,7 +3,7 @@ services:
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
- ipld-eth-db
|
||||
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.4.0-alpha
|
||||
image: git.vdb.to/cerc-io/ipld-eth-db/ipld-eth-db:v5.0.5-alpha
|
||||
environment:
|
||||
DATABASE_USER: "vdbm"
|
||||
DATABASE_NAME: "cerc_testing"
|
||||
|
1
test/stack-refs.txt
Normal file
1
test/stack-refs.txt
Normal file
@ -0,0 +1 @@
|
||||
github.com/cerc-io/ipld-eth-db v5.0.5-alpha
|
@ -1,20 +0,0 @@
|
||||
version: "1.2"
|
||||
name: fixturenet-plugeth-tx
|
||||
description: "Plugeth Ethereum Fixturenet for testing plugeth-statediff"
|
||||
repos:
|
||||
- git.vdb.to/cerc-io/plugeth@v1.13.14-cerc-2
|
||||
- git.vdb.to/cerc-io/plugeth-statediff
|
||||
- git.vdb.to/cerc-io/lighthouse
|
||||
- git.vdb.to/cerc-io/ipld-eth-db@v5.4.0-alpha
|
||||
containers:
|
||||
- cerc/plugeth-statediff
|
||||
- cerc/plugeth
|
||||
- cerc/fixturenet-eth-genesis
|
||||
- cerc/fixturenet-plugeth-plugeth
|
||||
- cerc/lighthouse
|
||||
- cerc/lighthouse-cli
|
||||
- cerc/fixturenet-eth-lighthouse
|
||||
- cerc/ipld-eth-db
|
||||
pods:
|
||||
- fixturenet-plugeth
|
||||
- ipld-eth-db
|
@ -2,7 +2,6 @@ package test_helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
@ -94,7 +93,6 @@ func RunStateSnapshot(
|
||||
tr := tracker.New(recoveryFile, subtries)
|
||||
defer tr.CloseAndSave()
|
||||
return builder.WriteStateSnapshot(
|
||||
context.Background(),
|
||||
test.StateRoot, params, stateAppender, ipldAppender, tr,
|
||||
)
|
||||
}
|
||||
|
@ -1,30 +0,0 @@
|
||||
package chaingen
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
type ContractSpec struct {
|
||||
DeploymentCode []byte
|
||||
ABI abi.ABI
|
||||
}
|
||||
|
||||
func ParseContract(abiStr, binStr string) (*ContractSpec, error) {
|
||||
parsedABI, err := abi.JSON(strings.NewReader(abiStr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := common.Hex2Bytes(binStr)
|
||||
return &ContractSpec{data, parsedABI}, nil
|
||||
}
|
||||
|
||||
func MustParseContract(abiStr, binStr string) *ContractSpec {
|
||||
spec, err := ParseContract(abiStr, binStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return spec
|
||||
}
|
@ -1,207 +0,0 @@
|
||||
package chaingen
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
const secondsPerBlock = 12
|
||||
|
||||
type GenContext struct {
|
||||
ChainConfig *params.ChainConfig
|
||||
GenFuncs []func(int, *core.BlockGen)
|
||||
DB ethdb.Database
|
||||
|
||||
Keys map[common.Address]*ecdsa.PrivateKey
|
||||
Contracts map[string]*ContractSpec
|
||||
Genesis *types.Block
|
||||
|
||||
block *core.BlockGen // cache the current block for my methods' use
|
||||
deployed map[common.Address]string // names of deployed contracts keyed by deployer
|
||||
time uint64 // time at current block, in seconds
|
||||
}
|
||||
|
||||
func NewGenContext(chainConfig *params.ChainConfig, db ethdb.Database) *GenContext {
|
||||
return &GenContext{
|
||||
ChainConfig: chainConfig,
|
||||
DB: db,
|
||||
Keys: make(map[common.Address]*ecdsa.PrivateKey),
|
||||
Contracts: make(map[string]*ContractSpec),
|
||||
|
||||
deployed: make(map[common.Address]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (gen *GenContext) AddFunction(fn func(int, *core.BlockGen)) {
|
||||
gen.GenFuncs = append(gen.GenFuncs, fn)
|
||||
}
|
||||
|
||||
func (gen *GenContext) AddOwnedAccount(key *ecdsa.PrivateKey) common.Address {
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
gen.Keys[addr] = key
|
||||
return addr
|
||||
}
|
||||
|
||||
func (gen *GenContext) AddContract(name string, spec *ContractSpec) {
|
||||
gen.Contracts[name] = spec
|
||||
}
|
||||
|
||||
func (gen *GenContext) generate(i int, block *core.BlockGen) {
|
||||
gen.block = block
|
||||
for _, fn := range gen.GenFuncs {
|
||||
fn(i, block)
|
||||
}
|
||||
gen.time += secondsPerBlock
|
||||
}
|
||||
|
||||
// MakeChain creates a chain of n blocks starting at and including the genesis block.
|
||||
// the returned hash chain is ordered head->parent.
|
||||
func (gen *GenContext) MakeChain(n int) ([]*types.Block, []types.Receipts, *core.BlockChain) {
|
||||
blocks, receipts := core.GenerateChain(
|
||||
gen.ChainConfig, gen.Genesis, ethash.NewFaker(), gen.DB, n, gen.generate,
|
||||
)
|
||||
chain, err := core.NewBlockChain(gen.DB, nil, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return append([]*types.Block{gen.Genesis}, blocks...), receipts, chain
|
||||
}
|
||||
|
||||
func (gen *GenContext) CreateSendTx(from common.Address, to common.Address, amount *big.Int) (*types.Transaction, error) {
|
||||
return gen.createTx(from, &to, amount, params.TxGas, nil)
|
||||
}
|
||||
|
||||
func (gen *GenContext) CreateContractTx(from common.Address, contractName string) (*types.Transaction, error) {
|
||||
contract := gen.Contracts[contractName]
|
||||
if contract == nil {
|
||||
return nil, errors.New("No contract with name " + contractName)
|
||||
}
|
||||
return gen.createTx(from, nil, big.NewInt(0), 1000000, contract.DeploymentCode)
|
||||
}
|
||||
|
||||
func (gen *GenContext) CreateCallTx(from common.Address, to common.Address, methodName string, args ...interface{}) (*types.Transaction, error) {
|
||||
contractName, ok := gen.deployed[to]
|
||||
if !ok {
|
||||
return nil, errors.New("No contract deployed at address " + to.String())
|
||||
}
|
||||
contract := gen.Contracts[contractName]
|
||||
if contract == nil {
|
||||
return nil, errors.New("No contract with name " + contractName)
|
||||
}
|
||||
|
||||
packed, err := contract.ABI.Pack(methodName, args...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gen.createTx(from, &to, big.NewInt(0), 100000, packed)
|
||||
}
|
||||
|
||||
func (gen *GenContext) DeployContract(from common.Address, contractName string) (common.Address, error) {
|
||||
tx, err := gen.CreateContractTx(from, contractName)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
addr := crypto.CreateAddress(from, gen.block.TxNonce(from))
|
||||
gen.deployed[addr] = contractName
|
||||
gen.block.AddTx(tx)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (gen *GenContext) createTx(from common.Address, to *common.Address, amount *big.Int, gasLimit uint64, data []byte) (*types.Transaction, error) {
|
||||
signer := types.MakeSigner(gen.ChainConfig, gen.block.Number(), gen.time)
|
||||
nonce := gen.block.TxNonce(from)
|
||||
priv, ok := gen.Keys[from]
|
||||
if !ok {
|
||||
return nil, errors.New("No private key for sender address" + from.String())
|
||||
}
|
||||
|
||||
var tx *types.Transaction
|
||||
if gen.ChainConfig.IsLondon(gen.block.Number()) {
|
||||
tx = types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: gen.ChainConfig.ChainID,
|
||||
Nonce: nonce,
|
||||
To: to,
|
||||
Gas: gasLimit,
|
||||
GasTipCap: big.NewInt(50),
|
||||
GasFeeCap: big.NewInt(1000000000),
|
||||
Value: amount,
|
||||
Data: data,
|
||||
})
|
||||
} else {
|
||||
tx = types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: to,
|
||||
Value: amount,
|
||||
Gas: gasLimit,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
return types.SignTx(tx, signer, priv)
|
||||
}
|
||||
|
||||
func (gen *GenContext) createBlobTx(
|
||||
from common.Address,
|
||||
to common.Address,
|
||||
amount *uint256.Int,
|
||||
gasLimit uint64,
|
||||
blobData []byte,
|
||||
) (*types.Transaction, error) {
|
||||
signer := types.MakeSigner(gen.ChainConfig, gen.block.Number(), gen.time)
|
||||
nonce := gen.block.TxNonce(from)
|
||||
priv, ok := gen.Keys[from]
|
||||
if !ok {
|
||||
return nil, errors.New("No private key for sender address" + from.String())
|
||||
}
|
||||
|
||||
if !gen.ChainConfig.IsCancun(gen.block.Number(), gen.time) {
|
||||
return nil, errors.New("blob tx is only supported from Cancun fork")
|
||||
}
|
||||
|
||||
sidecar := MakeSidecar([][]byte{blobData})
|
||||
tx := types.NewTx(&types.BlobTx{
|
||||
ChainID: uint256.MustFromBig(gen.ChainConfig.ChainID),
|
||||
Nonce: nonce,
|
||||
To: to,
|
||||
Gas: gasLimit,
|
||||
GasTipCap: uint256.NewInt(50),
|
||||
GasFeeCap: uint256.NewInt(1000000000),
|
||||
Value: amount,
|
||||
BlobFeeCap: uint256.NewInt(1000000),
|
||||
BlobHashes: sidecar.BlobHashes(),
|
||||
Sidecar: sidecar,
|
||||
})
|
||||
return types.SignTx(tx, signer, priv)
|
||||
}
|
||||
|
||||
// From go-ethereum/cmd/devp2p/internal/ethtest/chain.go
|
||||
func MakeSidecar(data [][]byte) *types.BlobTxSidecar {
|
||||
var (
|
||||
blobs = make([]kzg4844.Blob, len(data))
|
||||
commitments []kzg4844.Commitment
|
||||
proofs []kzg4844.Proof
|
||||
)
|
||||
for i := range blobs {
|
||||
copy(blobs[i][:], data[i])
|
||||
c, _ := kzg4844.BlobToCommitment(blobs[i])
|
||||
p, _ := kzg4844.ComputeBlobProof(blobs[i], c)
|
||||
commitments = append(commitments, c)
|
||||
proofs = append(proofs, p)
|
||||
}
|
||||
return &types.BlobTxSidecar{
|
||||
Blobs: blobs,
|
||||
Commitments: commitments,
|
||||
Proofs: proofs,
|
||||
}
|
||||
}
|
@ -17,16 +17,17 @@
|
||||
package test_helpers
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
|
||||
var (
|
||||
BalanceChange1000 = int64(1000)
|
||||
BalanceChange10000 = int64(10000)
|
||||
BalanceChangeBIG = uint256.MustFromDecimal("2000000000000000000000000000000000000000000")
|
||||
BalanceChangeBIG, _ = big.NewInt(0).SetString("2000000000000000000000000000000000000000000", 10)
|
||||
BalanceChange1Ether = int64(params.Ether)
|
||||
Block1Account1Balance = uint256.NewInt(uint64(BalanceChange10000))
|
||||
Block1Account1Balance = big.NewInt(BalanceChange10000)
|
||||
Block1bAccount1Balance = BalanceChangeBIG
|
||||
GasFees = int64(params.GWei) * int64(params.TxGas)
|
||||
GasFees2 = int64(params.TxGas) * int64(params.InitialBaseFee)
|
||||
|
@ -27,28 +27,27 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff/utils"
|
||||
)
|
||||
|
||||
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance, baseFee *big.Int, initialGasLimit uint64) *types.Block {
|
||||
alloc := map[common.Address]types.Account{addr: {Balance: balance}}
|
||||
alloc := map[common.Address]core.GenesisAccount{
|
||||
addr: core.GenesisAccount{Balance: balance}}
|
||||
g := core.Genesis{
|
||||
Config: TestChainConfig,
|
||||
Alloc: alloc,
|
||||
BaseFee: baseFee,
|
||||
}
|
||||
if initialGasLimit != 0 {
|
||||
g.GasLimit = initialGasLimit
|
||||
}
|
||||
return g.MustCommit(db, triedb.NewDatabase(db, nil))
|
||||
return g.MustCommit(db)
|
||||
}
|
||||
|
||||
// MakeChain creates a chain of n blocks starting at and including parent.
|
||||
// the returned hash chain is ordered head->parent.
|
||||
func MakeChain(n int, parent *types.Block, chainGen func(int, *core.BlockGen)) ([]*types.Block, *core.BlockChain) {
|
||||
config := TestChainConfig
|
||||
config := params.TestChainConfig
|
||||
blocks, _ := core.GenerateChain(config, parent, ethash.NewFaker(), Testdb, n, chainGen)
|
||||
chain, _ := core.NewBlockChain(Testdb, nil, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
return blocks, chain
|
||||
@ -149,7 +148,7 @@ func TestChainGenWithInternalLeafNode(i int, block *core.BlockGen) {
|
||||
switch i {
|
||||
case 0:
|
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(TestBankAddress), Account1Addr, BalanceChangeBIG.ToBig(), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, TestBankKey)
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(TestBankAddress), Account1Addr, BalanceChangeBIG, params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, TestBankKey)
|
||||
block.AddTx(tx)
|
||||
case 1:
|
||||
// In block 2 Account1Addr creates a test contract.
|
||||
|
@ -1,100 +0,0 @@
|
||||
package test_helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/cerc-io/plugeth-statediff"
|
||||
"github.com/cerc-io/plugeth-statediff/adapt"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/interfaces"
|
||||
"github.com/cerc-io/plugeth-statediff/indexer/node"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
type IndexChainParams struct {
|
||||
Blocks []*types.Block
|
||||
Receipts []types.Receipts
|
||||
StateCache state.Database
|
||||
|
||||
StateDiffParams statediff.Params
|
||||
TotalDifficulty *big.Int
|
||||
// Whether to skip indexing state nodes (state_cids, storage_cids)
|
||||
SkipStateNodes bool
|
||||
// Whether to skip indexing IPLD blocks
|
||||
SkipIPLDs bool
|
||||
}
|
||||
|
||||
func NewIndexer(ctx context.Context, chainConfig *params.ChainConfig, genHash common.Hash, dbconfig interfaces.Config) (interfaces.StateDiffIndexer, error) {
|
||||
testInfo := node.Info{
|
||||
GenesisBlock: genHash.String(),
|
||||
NetworkID: "1",
|
||||
ID: "1",
|
||||
ClientName: "geth",
|
||||
ChainID: chainConfig.ChainID.Uint64(),
|
||||
}
|
||||
_, indexer, err := indexer.NewStateDiffIndexer(ctx, chainConfig, testInfo, dbconfig, true)
|
||||
return indexer, err
|
||||
}
|
||||
|
||||
func IndexChain(indexer interfaces.StateDiffIndexer, params IndexChainParams) error {
|
||||
builder := statediff.NewBuilder(adapt.GethStateView(params.StateCache))
|
||||
// iterate over the blocks, generating statediff payloads, and transforming the data into Postgres
|
||||
for i, block := range params.Blocks {
|
||||
var args statediff.Args
|
||||
var rcts types.Receipts
|
||||
if i == 0 {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: common.Hash{},
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
} else {
|
||||
args = statediff.Args{
|
||||
OldStateRoot: params.Blocks[i-1].Root(),
|
||||
NewStateRoot: block.Root(),
|
||||
BlockNumber: block.Number(),
|
||||
BlockHash: block.Hash(),
|
||||
}
|
||||
rcts = params.Receipts[i-1]
|
||||
}
|
||||
|
||||
diff, err := builder.BuildStateDiffObject(args, params.StateDiffParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build diff (block %d): %w", block.Number(), err)
|
||||
}
|
||||
tx, err := indexer.PushBlock(block, rcts, params.TotalDifficulty)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to index block (block %d): %w", block.Number(), err)
|
||||
}
|
||||
defer tx.RollbackOnFailure(err)
|
||||
|
||||
if !params.SkipStateNodes {
|
||||
for _, node := range diff.Nodes {
|
||||
if err = indexer.PushStateNode(tx, node, block.Hash().String()); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to index state node: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.SkipIPLDs {
|
||||
for _, ipld := range diff.IPLDs {
|
||||
if err := indexer.PushIPLD(tx, ipld); err != nil {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to index IPLD: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = tx.Submit(); err != nil {
|
||||
return fmt.Errorf("failed to commit diff: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -52,7 +52,6 @@ var (
|
||||
StorageValue = utils.Hex2Bytes("0x03")
|
||||
NullHash = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")
|
||||
|
||||
TestChainConfig = &*params.TestChainConfig
|
||||
Testdb = rawdb.NewMemoryDatabase()
|
||||
TestBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
TestBankAddress = crypto.PubkeyToAddress(TestBankKey.PublicKey) //0x71562b71999873DB5b286dF957af199Ec94617F7
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
"github.com/cerc-io/plugeth-statediff/utils/log"
|
||||
)
|
||||
|
||||
// QuietLogs discards the geth logs and sets the plugin test log level to "warning"
|
||||
// The geth sync logs are noisy, so during some tests it helps to silence them.
|
||||
// QuietLogs silences the geth logs and sets the plugin test log level to "warning"
|
||||
// The geth sync logs are noisy, so it can be nice to silence them.
|
||||
func QuietLogs() {
|
||||
geth_log.SetDefault(geth_log.New(geth_log.DiscardHandler()))
|
||||
geth_log.Root().SetHandler(geth_log.DiscardHandler())
|
||||
log.TestLogger.SetLevel(2)
|
||||
}
|
||||
|
@ -3,13 +3,12 @@ package utils_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/cerc-io/eth-testing/chains/mainnet"
|
||||
"github.com/cerc-io/eth-testing/chaindata/mainnet"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -44,26 +43,16 @@ var (
|
||||
|
||||
func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
t.Run("with no difference", func(t *testing.T) {
|
||||
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||
db := trie.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
triea := trie.NewEmpty(db)
|
||||
|
||||
ita, err := triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err := triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
di := utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
di := utils.NewSymmetricDifferenceIterator(triea.NodeIterator(nil), triea.NodeIterator(nil))
|
||||
for di.Next(true) {
|
||||
t.Errorf("iterator should not yield any elements")
|
||||
}
|
||||
assert.Equal(t, 0, di.Count())
|
||||
|
||||
triea.MustUpdate([]byte("foo"), []byte("bar"))
|
||||
ita, err = triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err = triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
di = utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
di = utils.NewSymmetricDifferenceIterator(triea.NodeIterator(nil), triea.NodeIterator(nil))
|
||||
for di.Next(true) {
|
||||
t.Errorf("iterator should not yield any elements")
|
||||
}
|
||||
@ -71,11 +60,7 @@ func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
assert.Equal(t, 2, di.Count())
|
||||
|
||||
trieb := trie.NewEmpty(db)
|
||||
ita, err = triea.NodeIterator([]byte("jars"))
|
||||
assert.NoError(t, err)
|
||||
itb, err = trieb.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
di = utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
di = utils.NewSymmetricDifferenceIterator(triea.NodeIterator([]byte("jars")), trieb.NodeIterator(nil))
|
||||
for di.Next(true) {
|
||||
t.Errorf("iterator should not yield any elements")
|
||||
}
|
||||
@ -90,18 +75,14 @@ func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("small difference", func(t *testing.T) {
|
||||
dba := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||
dba := trie.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
triea := trie.NewEmpty(dba)
|
||||
|
||||
dbb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||
dbb := trie.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
trieb := trie.NewEmpty(dbb)
|
||||
trieb.MustUpdate([]byte("foo"), []byte("bar"))
|
||||
|
||||
ita, err := triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err := trieb.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
di := utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
di := utils.NewSymmetricDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
|
||||
leaves := 0
|
||||
for di.Next(true) {
|
||||
if di.Leaf() {
|
||||
@ -115,11 +96,7 @@ func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
assert.Equal(t, 2, di.Count())
|
||||
|
||||
trieb.MustUpdate([]byte("quux"), []byte("bars"))
|
||||
ita, err = triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err = trieb.NodeIterator([]byte("quux"))
|
||||
assert.NoError(t, err)
|
||||
di = utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
di = utils.NewSymmetricDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator([]byte("quux")))
|
||||
leaves = 0
|
||||
for di.Next(true) {
|
||||
if di.Leaf() {
|
||||
@ -133,12 +110,12 @@ func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
assert.Equal(t, 1, di.Count())
|
||||
})
|
||||
|
||||
dba := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||
dba := trie.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
triea := trie.NewEmpty(dba)
|
||||
for _, val := range testdata1 {
|
||||
triea.MustUpdate([]byte(val.k), []byte(val.v))
|
||||
}
|
||||
dbb := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
|
||||
dbb := trie.NewDatabase(rawdb.NewMemoryDatabase())
|
||||
trieb := trie.NewEmpty(dbb)
|
||||
for _, val := range testdata2 {
|
||||
trieb.MustUpdate([]byte(val.k), []byte(val.v))
|
||||
@ -147,11 +124,7 @@ func TestSymmetricDifferenceIterator(t *testing.T) {
|
||||
onlyA := make(map[string]string)
|
||||
onlyB := make(map[string]string)
|
||||
var deletions, creations []string
|
||||
ita, err := triea.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err := trieb.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
it := utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
it := utils.NewSymmetricDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
|
||||
for it.Next(true) {
|
||||
if !it.Leaf() {
|
||||
continue
|
||||
@ -204,7 +177,7 @@ func TestCompareDifferenceIterators(t *testing.T) {
|
||||
test_helpers.QuietLogs()
|
||||
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
core.DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, nil))
|
||||
core.DefaultGenesisBlock().MustCommit(db)
|
||||
blocks := mainnet.GetBlocks()
|
||||
chain, _ := core.NewBlockChain(db, nil, nil, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||
_, err := chain.InsertChain(blocks[1:])
|
||||
@ -223,28 +196,16 @@ func TestCompareDifferenceIterators(t *testing.T) {
|
||||
// collect the paths of nodes exclusive to A and B separately, then make sure the symmetric
|
||||
// iterator produces the same sets
|
||||
var pathsA, pathsB [][]byte
|
||||
ita, err := treeA.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err := treeB.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itBonly, _ := trie.NewDifferenceIterator(ita, itb)
|
||||
itBonly, _ := trie.NewDifferenceIterator(treeA.NodeIterator(nil), treeB.NodeIterator(nil))
|
||||
for itBonly.Next(true) {
|
||||
pathsB = append(pathsB, itBonly.Path())
|
||||
}
|
||||
ita, err = treeA.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err = treeB.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itAonly, _ := trie.NewDifferenceIterator(itb, ita)
|
||||
itAonly, _ := trie.NewDifferenceIterator(treeB.NodeIterator(nil), treeA.NodeIterator(nil))
|
||||
for itAonly.Next(true) {
|
||||
pathsA = append(pathsA, itAonly.Path())
|
||||
}
|
||||
|
||||
ita, err = treeA.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itb, err = treeB.NodeIterator(nil)
|
||||
assert.NoError(t, err)
|
||||
itSym := utils.NewSymmetricDifferenceIterator(ita, itb)
|
||||
itSym := utils.NewSymmetricDifferenceIterator(treeA.NodeIterator(nil), treeB.NodeIterator(nil))
|
||||
var idxA, idxB int
|
||||
for itSym.Next(true) {
|
||||
if itSym.FromA() {
|
||||
|
@ -51,6 +51,8 @@ func ChainConfig(chainID uint64) (*params.ChainConfig, error) {
|
||||
switch chainID {
|
||||
case 1:
|
||||
return params.MainnetChainConfig, nil
|
||||
case 4:
|
||||
return params.RinkebyChainConfig, nil
|
||||
case 5:
|
||||
return params.GoerliChainConfig, nil
|
||||
default:
|
||||
|
Loading…
Reference in New Issue
Block a user