forked from cerc-io/ipld-eth-server
command edits; external pkg for finding min deployment block; gofmt
This commit is contained in:
parent
8562abd180
commit
a834e55b9f
@ -22,7 +22,6 @@ import (
|
||||
syn "sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@ -186,12 +185,11 @@ func composeAndExecute() {
|
||||
log.Debug("fetching storage diffs from geth pub sub")
|
||||
rpcClient, _ := getClients()
|
||||
stateDiffStreamer := streamer.NewStateDiffStreamer(rpcClient)
|
||||
payloadChan := make(chan statediff.Payload)
|
||||
storageFetcher := fetcher.NewGethRPCStorageFetcher(&stateDiffStreamer, payloadChan)
|
||||
storageFetcher := fetcher.NewGethRPCStorageFetcher(stateDiffStreamer)
|
||||
sw := watcher.NewStorageWatcher(storageFetcher, &db)
|
||||
sw.AddTransformers(ethStorageInitializers)
|
||||
wg.Add(1)
|
||||
go watchEthStorage(&sw, &wg)
|
||||
go watchEthStorage(sw, &wg)
|
||||
default:
|
||||
log.Debug("fetching storage diffs from csv")
|
||||
tailer := fs.FileTailer{Path: storageDiffsPath}
|
||||
@ -199,7 +197,7 @@ func composeAndExecute() {
|
||||
sw := watcher.NewStorageWatcher(storageFetcher, &db)
|
||||
sw.AddTransformers(ethStorageInitializers)
|
||||
wg.Add(1)
|
||||
go watchEthStorage(&sw, &wg)
|
||||
go watchEthStorage(sw, &wg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
@ -134,12 +133,11 @@ func execute() {
|
||||
log.Debug("fetching storage diffs from geth pub sub")
|
||||
rpcClient, _ := getClients()
|
||||
stateDiffStreamer := streamer.NewStateDiffStreamer(rpcClient)
|
||||
payloadChan := make(chan statediff.Payload)
|
||||
storageFetcher := fetcher.NewGethRPCStorageFetcher(&stateDiffStreamer, payloadChan)
|
||||
storageFetcher := fetcher.NewGethRPCStorageFetcher(stateDiffStreamer)
|
||||
sw := watcher.NewStorageWatcher(storageFetcher, &db)
|
||||
sw.AddTransformers(ethStorageInitializers)
|
||||
wg.Add(1)
|
||||
go watchEthStorage(&sw, &wg)
|
||||
go watchEthStorage(sw, &wg)
|
||||
default:
|
||||
log.Debug("fetching storage diffs from csv")
|
||||
tailer := fs.FileTailer{Path: storageDiffsPath}
|
||||
@ -147,7 +145,7 @@ func execute() {
|
||||
sw := watcher.NewStorageWatcher(storageFetcher, &db)
|
||||
sw.AddTransformers(ethStorageInitializers)
|
||||
wg.Add(1)
|
||||
go watchEthStorage(&sw, &wg)
|
||||
go watchEthStorage(sw, &wg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,13 +190,14 @@ func watchEthStorage(w watcher.IStorageWatcher, wg *syn.WaitGroup) {
|
||||
LogWithCommand.Info("executing storage transformers")
|
||||
on := viper.GetBool("storageBackFill.on")
|
||||
if on {
|
||||
go backFillStorage(w)
|
||||
backFillStorage(w)
|
||||
}
|
||||
w.Execute(queueRecheckInterval, on)
|
||||
}
|
||||
|
||||
func backFillStorage(w watcher.IStorageWatcher) {
|
||||
// configure archival rpc client
|
||||
// move this all into the storage watcher?
|
||||
archivalRPCPath := viper.GetString("storageBackFill.rpcPath")
|
||||
if archivalRPCPath == "" {
|
||||
LogWithCommand.Fatal(errors.New("storage backfill is turned on but no rpc path is provided"))
|
||||
@ -207,12 +206,12 @@ func backFillStorage(w watcher.IStorageWatcher) {
|
||||
if dialErr != nil {
|
||||
LogWithCommand.Fatal(dialErr)
|
||||
}
|
||||
rpcClient := client.NewRpcClient(rawRPCClient, archivalRPCPath)
|
||||
rpcClient := client.NewRPCClient(rawRPCClient, archivalRPCPath)
|
||||
// find min deployment block
|
||||
minDeploymentBlock := viper.GetInt("storageBackFill.startingBlock")
|
||||
minDeploymentBlock := constants.GetMinDeploymentBlock()
|
||||
stateDiffFetcher := fetcher.NewStateDiffFetcher(rpcClient)
|
||||
backFiller := storage.NewStorageBackFiller(stateDiffFetcher)
|
||||
w.BackFill(backFiller, minDeploymentBlock)
|
||||
backFiller := storage.NewStorageBackFiller(stateDiffFetcher, uint64(minDeploymentBlock), storage.DefaultMaxBatchSize)
|
||||
go w.BackFill(backFiller)
|
||||
}
|
||||
|
||||
func watchEthContract(w *watcher.ContractWatcher, wg *syn.WaitGroup) {
|
||||
|
@ -205,8 +205,6 @@ To do so, add the following fields to the config file.
|
||||
[storageBackFill]
|
||||
on = false
|
||||
rpcPath = ""
|
||||
startingBlock = 0
|
||||
```
|
||||
- `on` is set to `true` to turn the backfill process on
|
||||
- `rpcPath` is the websocket or ipc path to the modified archival geth node that exposes the `StateDiffAt` rpc endpoint we can use to backfill storage diffs
|
||||
- `startingBlock` is the block height at which we want to begin the backfill process; the height of the earliest contract deployment
|
||||
- `rpcPath` is the websocket or ipc path to the modified archival geth node that exposes the `StateDiffAt` rpc endpoint we can use to backfill storage diffs
|
90
libraries/shared/constants/external.go
Normal file
90
libraries/shared/constants/external.go
Normal file
@ -0,0 +1,90 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package constants
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var initialized = false
|
||||
|
||||
func initConfig() {
|
||||
if initialized {
|
||||
return
|
||||
}
|
||||
|
||||
if err := viper.ReadInConfig(); err == nil {
|
||||
log.Info("Using config file:", viper.ConfigFileUsed())
|
||||
} else {
|
||||
panic(fmt.Sprintf("Could not find environment file: %v", err))
|
||||
}
|
||||
initialized = true
|
||||
}
|
||||
|
||||
// GetMinDeploymentBlock gets the minimum deployment block for multiple contracts from config
|
||||
func GetMinDeploymentBlock() uint64 {
|
||||
initConfig()
|
||||
contractNames := getContractNames()
|
||||
if len(contractNames) < 1 {
|
||||
log.Fatalf("No contracts supplied")
|
||||
}
|
||||
minBlock := uint64(math.MaxUint64)
|
||||
for _, c := range contractNames {
|
||||
deployed := getDeploymentBlock(c)
|
||||
if deployed < minBlock {
|
||||
minBlock = deployed
|
||||
}
|
||||
}
|
||||
return minBlock
|
||||
}
|
||||
|
||||
func getContractNames() []string {
|
||||
initConfig()
|
||||
transformerNames := viper.GetStringSlice("exporter.transformerNames")
|
||||
contractNames := make([]string, 0)
|
||||
for _, transformerName := range transformerNames {
|
||||
configKey := "exporter." + transformerName + ".contracts"
|
||||
names := viper.GetStringSlice(configKey)
|
||||
for _, name := range names {
|
||||
contractNames = appendNoDuplicates(transformerNames, name)
|
||||
}
|
||||
}
|
||||
return contractNames
|
||||
}
|
||||
|
||||
func appendNoDuplicates(strSlice []string, str string) []string {
|
||||
for _, strInSlice := range strSlice {
|
||||
if strInSlice == str {
|
||||
return strSlice
|
||||
}
|
||||
}
|
||||
return append(strSlice, str)
|
||||
}
|
||||
|
||||
func getDeploymentBlock(contractName string) uint64 {
|
||||
configKey := "contract." + contractName + ".deployed"
|
||||
value := viper.GetInt64(configKey)
|
||||
if value < 0 {
|
||||
log.Infof("No deployment block configured for contract \"%v\", defaulting to 0.", contractName)
|
||||
return 0
|
||||
}
|
||||
return uint64(value)
|
||||
}
|
@ -19,9 +19,10 @@ package event
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"github.com/vulcanize/vulcanizedb/utils"
|
||||
"strings"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/utils"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
@ -18,6 +18,8 @@ package event_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/factories/event"
|
||||
@ -26,7 +28,6 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
|
@ -18,6 +18,7 @@ package fetcher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
|
||||
|
@ -30,8 +30,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMaxBatchSize uint64 = 5000
|
||||
defaultMaxBatchNumber int64 = 100
|
||||
DefaultMaxBatchSize uint64 = 1000
|
||||
defaultMaxBatchNumber int64 = 10
|
||||
)
|
||||
|
||||
// BackFiller is the backfilling interface
|
||||
@ -64,6 +64,7 @@ func (bf *backFiller) BackFill(endingBlock uint64, backFill chan utils.StorageDi
|
||||
if endingBlock < bf.startingBlock {
|
||||
return errors.New("backfill: ending block number needs to be greater than starting block number")
|
||||
}
|
||||
logrus.Infof("going to fill in gap from %d to %d", bf.startingBlock, endingBlock)
|
||||
// break the range up into bins of smaller ranges
|
||||
length := endingBlock - bf.startingBlock + 1
|
||||
numberOfBins := length / bf.batchSize
|
||||
@ -88,7 +89,8 @@ func (bf *backFiller) BackFill(endingBlock uint64, backFill chan utils.StorageDi
|
||||
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
|
||||
var activeCount int64
|
||||
// channel for processing goroutines to signal when they are done
|
||||
processingDone := make(chan bool)
|
||||
processingDone := make(chan [2]uint64)
|
||||
forwardDone := make(chan bool)
|
||||
|
||||
// for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
|
||||
go func() {
|
||||
@ -97,8 +99,7 @@ func (bf *backFiller) BackFill(endingBlock uint64, backFill chan utils.StorageDi
|
||||
// wait for one to finish before starting the next
|
||||
if atomic.AddInt64(&activeCount, 1) > defaultMaxBatchNumber {
|
||||
// this blocks until a process signals it has finished
|
||||
// immediately forwards the signal to the normal listener so that it keeps the correct count
|
||||
processingDone <- <-processingDone
|
||||
<-forwardDone
|
||||
}
|
||||
go func(blockHeights []uint64) {
|
||||
payloads, fetchErr := bf.fetcher.FetchStateDiffsAt(blockHeights)
|
||||
@ -131,7 +132,7 @@ func (bf *backFiller) BackFill(endingBlock uint64, backFill chan utils.StorageDi
|
||||
}
|
||||
}
|
||||
// when this goroutine is done, send out a signal
|
||||
processingDone <- true
|
||||
processingDone <- [2]uint64{blockHeights[0], blockHeights[len(blockHeights)-1]}
|
||||
}(blockHeights)
|
||||
}
|
||||
}()
|
||||
@ -143,8 +144,14 @@ func (bf *backFiller) BackFill(endingBlock uint64, backFill chan utils.StorageDi
|
||||
goroutinesFinished := 0
|
||||
for {
|
||||
select {
|
||||
case <-processingDone:
|
||||
case doneWithHeights := <-processingDone:
|
||||
atomic.AddInt64(&activeCount, -1)
|
||||
select {
|
||||
// if we are waiting for a process to finish, signal that one has
|
||||
case forwardDone <- true:
|
||||
default:
|
||||
}
|
||||
logrus.Infof("finished fetching gap sub-bin from %d to %d", doneWithHeights[0], doneWithHeights[1])
|
||||
goroutinesFinished++
|
||||
if goroutinesFinished == int(numberOfBins) {
|
||||
done <- true
|
||||
|
@ -17,9 +17,10 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -1,13 +1,14 @@
|
||||
package test_data
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
// Create a header sync log to reference in an event, returning inserted header sync log
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -105,7 +106,7 @@ func (tr *Transformer) Init() error {
|
||||
tr.sortedMethodIds = make(map[string][]string) // Map to sort method column ids by contract, for post fetch method polling
|
||||
tr.eventIds = make([]string, 0) // Holds event column ids across all contract, for batch fetching of headers
|
||||
tr.eventFilters = make([]common.Hash, 0) // Holds topic0 hashes across all contracts, for batch fetching of logs
|
||||
tr.Start = 100000000000
|
||||
tr.Start = math.MaxInt64
|
||||
|
||||
// Iterate through all internal contract addresses
|
||||
for contractAddr := range tr.Config.Addresses {
|
||||
|
Loading…
Reference in New Issue
Block a user