commit
bf26198458
1
.gitignore
vendored
1
.gitignore
vendored
@ -16,3 +16,4 @@ db/migrations/20*.sql
|
||||
plugins/*.so
|
||||
postgraphile/*.toml
|
||||
postgraphile/schema.graphql
|
||||
vulcanizedb.pem
|
||||
|
@ -39,8 +39,8 @@ var coldImportCmd = &cobra.Command{
|
||||
|
||||
Geth must be synced over all of the desired blocks and must not be running in order to execute this command.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
coldImport()
|
||||
},
|
||||
}
|
||||
@ -57,7 +57,7 @@ func coldImport() {
|
||||
ethDBConfig := ethereum.CreateDatabaseConfig(ethereum.Level, levelDbPath)
|
||||
ethDB, err := ethereum.CreateDatabase(ethDBConfig)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal("Error connecting to ethereum db: ", err)
|
||||
logWithCommand.Fatal("Error connecting to ethereum db: ", err)
|
||||
}
|
||||
mostRecentBlockNumberInDb := ethDB.GetHeadBlockNumber()
|
||||
if syncAll {
|
||||
@ -65,10 +65,10 @@ func coldImport() {
|
||||
endingBlockNumber = mostRecentBlockNumberInDb
|
||||
}
|
||||
if endingBlockNumber < startingBlockNumber {
|
||||
LogWithCommand.Fatal("Ending block number must be greater than starting block number for cold import.")
|
||||
logWithCommand.Fatal("Ending block number must be greater than starting block number for cold import.")
|
||||
}
|
||||
if endingBlockNumber > mostRecentBlockNumberInDb {
|
||||
LogWithCommand.Fatal("Ending block number is greater than most recent block in db: ", mostRecentBlockNumberInDb)
|
||||
logWithCommand.Fatal("Ending block number is greater than most recent block in db: ", mostRecentBlockNumberInDb)
|
||||
}
|
||||
|
||||
// init pg db
|
||||
@ -78,7 +78,7 @@ func coldImport() {
|
||||
nodeBuilder := cold_import.NewColdImportNodeBuilder(reader, parser)
|
||||
coldNode, err := nodeBuilder.GetNode(genesisBlock, levelDbPath)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal("Error getting node: ", err)
|
||||
logWithCommand.Fatal("Error getting node: ", err)
|
||||
}
|
||||
pgDB := utils.LoadPostgres(databaseConfig, coldNode)
|
||||
|
||||
@ -92,6 +92,6 @@ func coldImport() {
|
||||
coldImporter := cold_import.NewColdImporter(ethDB, blockRepository, receiptRepository, blockConverter)
|
||||
err = coldImporter.Execute(startingBlockNumber, endingBlockNumber, coldNode.ID)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal("Error executing cold import: ", err)
|
||||
logWithCommand.Fatal("Error executing cold import: ", err)
|
||||
}
|
||||
}
|
||||
|
@ -102,8 +102,8 @@ single config file or in separate command instances using different config files
|
||||
Specify config location when executing the command:
|
||||
./vulcanizedb compose --config=./environments/config_name.toml`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
compose()
|
||||
},
|
||||
}
|
||||
@ -113,25 +113,25 @@ func compose() {
|
||||
prepConfig()
|
||||
|
||||
// Generate code to build the plugin according to the config file
|
||||
LogWithCommand.Info("generating plugin")
|
||||
logWithCommand.Info("generating plugin")
|
||||
generator, err := p2.NewGenerator(genConfig, databaseConfig)
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("initializing plugin generator failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("initializing plugin generator failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
err = generator.GenerateExporterPlugin()
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("generating plugin failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("generating plugin failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
// TODO: Embed versioning info in the .so files so we know which version of vulcanizedb to run them with
|
||||
_, pluginPath, err := genConfig.GetPluginPaths()
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("getting plugin path failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("getting plugin path failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Composed plugin %s", pluginPath)
|
||||
LogWithCommand.Info("plugin .so file output to ", pluginPath)
|
||||
logWithCommand.Info("plugin .so file output to ", pluginPath)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -139,38 +139,38 @@ func init() {
|
||||
}
|
||||
|
||||
func prepConfig() {
|
||||
LogWithCommand.Info("configuring plugin")
|
||||
logWithCommand.Info("configuring plugin")
|
||||
names := viper.GetStringSlice("exporter.transformerNames")
|
||||
transformers := make(map[string]config.Transformer)
|
||||
for _, name := range names {
|
||||
transformer := viper.GetStringMapString("exporter." + name)
|
||||
p, pOK := transformer["path"]
|
||||
if !pOK || p == "" {
|
||||
LogWithCommand.Fatal(name, " transformer config is missing `path` value")
|
||||
logWithCommand.Fatal(name, " transformer config is missing `path` value")
|
||||
}
|
||||
r, rOK := transformer["repository"]
|
||||
if !rOK || r == "" {
|
||||
LogWithCommand.Fatal(name, " transformer config is missing `repository` value")
|
||||
logWithCommand.Fatal(name, " transformer config is missing `repository` value")
|
||||
}
|
||||
m, mOK := transformer["migrations"]
|
||||
if !mOK || m == "" {
|
||||
LogWithCommand.Fatal(name, " transformer config is missing `migrations` value")
|
||||
logWithCommand.Fatal(name, " transformer config is missing `migrations` value")
|
||||
}
|
||||
mr, mrOK := transformer["rank"]
|
||||
if !mrOK || mr == "" {
|
||||
LogWithCommand.Fatal(name, " transformer config is missing `rank` value")
|
||||
logWithCommand.Fatal(name, " transformer config is missing `rank` value")
|
||||
}
|
||||
rank, err := strconv.ParseUint(mr, 10, 64)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(name, " migration `rank` can't be converted to an unsigned integer")
|
||||
logWithCommand.Fatal(name, " migration `rank` can't be converted to an unsigned integer")
|
||||
}
|
||||
t, tOK := transformer["type"]
|
||||
if !tOK {
|
||||
LogWithCommand.Fatal(name, " transformer config is missing `type` value")
|
||||
logWithCommand.Fatal(name, " transformer config is missing `type` value")
|
||||
}
|
||||
transformerType := config.GetTransformerType(t)
|
||||
if transformerType == config.UnknownTransformerType {
|
||||
LogWithCommand.Fatal(errors.New(`unknown transformer type in exporter config accepted types are "eth_event", "eth_storage"`))
|
||||
logWithCommand.Fatal(errors.New(`unknown transformer type in exporter config accepted types are "eth_event", "eth_storage"`))
|
||||
}
|
||||
|
||||
transformers[name] = config.Transformer{
|
||||
|
@ -107,8 +107,8 @@ single config file or in separate command instances using different config files
|
||||
Specify config location when executing the command:
|
||||
./vulcanizedb composeAndExecute --config=./environments/config_name.toml`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
composeAndExecute()
|
||||
},
|
||||
}
|
||||
@ -118,44 +118,44 @@ func composeAndExecute() {
|
||||
prepConfig()
|
||||
|
||||
// Generate code to build the plugin according to the config file
|
||||
LogWithCommand.Info("generating plugin")
|
||||
logWithCommand.Info("generating plugin")
|
||||
generator, err := p2.NewGenerator(genConfig, databaseConfig)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
err = generator.GenerateExporterPlugin()
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("generating plugin failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("generating plugin failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
// Get the plugin path and load the plugin
|
||||
_, pluginPath, err := genConfig.GetPluginPaths()
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
if !genConfig.Save {
|
||||
defer helpers.ClearFiles(pluginPath)
|
||||
}
|
||||
LogWithCommand.Info("linking plugin ", pluginPath)
|
||||
logWithCommand.Info("linking plugin ", pluginPath)
|
||||
plug, err := plugin.Open(pluginPath)
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("linking plugin failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("linking plugin failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
// Load the `Exporter` symbol from the plugin
|
||||
LogWithCommand.Info("loading transformers from plugin")
|
||||
logWithCommand.Info("loading transformers from plugin")
|
||||
symExporter, err := plug.Lookup("Exporter")
|
||||
if err != nil {
|
||||
LogWithCommand.Debug("loading Exporter symbol failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Debug("loading Exporter symbol failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
// Assert that the symbol is of type Exporter
|
||||
exporter, ok := symExporter.(Exporter)
|
||||
if !ok {
|
||||
LogWithCommand.Debug("plugged-in symbol not of type Exporter")
|
||||
logWithCommand.Debug("plugged-in symbol not of type Exporter")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@ -173,7 +173,7 @@ func composeAndExecute() {
|
||||
ew := watcher.NewEventWatcher(&db, blockChain)
|
||||
err := ew.AddTransformers(ethEventInitializers)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||
logWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||
}
|
||||
wg.Add(1)
|
||||
go watchEthEvents(&ew, &wg)
|
||||
|
@ -79,8 +79,8 @@ Requires a .toml config file:
|
||||
piping = true
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
contractWatcher()
|
||||
},
|
||||
}
|
||||
@ -105,18 +105,18 @@ func contractWatcher() {
|
||||
case "full":
|
||||
t = ft.NewTransformer(con, blockChain, &db)
|
||||
default:
|
||||
LogWithCommand.Fatal("Invalid mode")
|
||||
logWithCommand.Fatal("Invalid mode")
|
||||
}
|
||||
|
||||
err := t.Init()
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(fmt.Sprintf("Failed to initialize transformer, err: %v ", err))
|
||||
logWithCommand.Fatal(fmt.Sprintf("Failed to initialize transformer, err: %v ", err))
|
||||
}
|
||||
|
||||
for range ticker.C {
|
||||
err = t.Execute()
|
||||
if err != nil {
|
||||
LogWithCommand.Error("Execution error for transformer: ", t.GetConfig().Name, err)
|
||||
logWithCommand.Error("Execution error for transformer: ", t.GetConfig().Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -60,8 +60,8 @@ must have been composed by the same version of vulcanizedb or else it will not b
|
||||
Specify config location when executing the command:
|
||||
./vulcanizedb execute --config=./environments/config_name.toml`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
execute()
|
||||
},
|
||||
}
|
||||
@ -73,29 +73,29 @@ func execute() {
|
||||
// Get the plugin path and load the plugin
|
||||
_, pluginPath, err := genConfig.GetPluginPaths()
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Executing plugin %s", pluginPath)
|
||||
LogWithCommand.Info("linking plugin ", pluginPath)
|
||||
logWithCommand.Info("linking plugin ", pluginPath)
|
||||
plug, err := plugin.Open(pluginPath)
|
||||
if err != nil {
|
||||
LogWithCommand.Warn("linking plugin failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Warn("linking plugin failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
// Load the `Exporter` symbol from the plugin
|
||||
LogWithCommand.Info("loading transformers from plugin")
|
||||
logWithCommand.Info("loading transformers from plugin")
|
||||
symExporter, err := plug.Lookup("Exporter")
|
||||
if err != nil {
|
||||
LogWithCommand.Warn("loading Exporter symbol failed")
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Warn("loading Exporter symbol failed")
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
|
||||
// Assert that the symbol is of type Exporter
|
||||
exporter, ok := symExporter.(Exporter)
|
||||
if !ok {
|
||||
LogWithCommand.Fatal("plugged-in symbol not of type Exporter")
|
||||
logWithCommand.Fatal("plugged-in symbol not of type Exporter")
|
||||
}
|
||||
|
||||
// Use the Exporters export method to load the EventTransformerInitializer, StorageTransformerInitializer, and ContractTransformerInitializer sets
|
||||
@ -112,7 +112,7 @@ func execute() {
|
||||
ew := watcher.NewEventWatcher(&db, blockChain)
|
||||
err = ew.AddTransformers(ethEventInitializers)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||
logWithCommand.Fatalf("failed to add event transformer initializers to watcher: %s", err.Error())
|
||||
}
|
||||
wg.Add(1)
|
||||
go watchEthEvents(&ew, &wg)
|
||||
@ -162,7 +162,7 @@ type Exporter interface {
|
||||
func watchEthEvents(w *watcher.EventWatcher, wg *syn.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Execute over the EventTransformerInitializer set using the watcher
|
||||
LogWithCommand.Info("executing event transformers")
|
||||
logWithCommand.Info("executing event transformers")
|
||||
var recheck constants.TransformerExecution
|
||||
if recheckHeadersArg {
|
||||
recheck = constants.HeaderRecheck
|
||||
@ -171,14 +171,14 @@ func watchEthEvents(w *watcher.EventWatcher, wg *syn.WaitGroup) {
|
||||
}
|
||||
err := w.Execute(recheck)
|
||||
if err != nil {
|
||||
LogWithCommand.Fatalf("error executing event watcher: %s", err.Error())
|
||||
logWithCommand.Fatalf("error executing event watcher: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func watchEthStorage(w watcher.IStorageWatcher, wg *syn.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Execute over the StorageTransformerInitializer set using the storage watcher
|
||||
LogWithCommand.Info("executing storage transformers")
|
||||
logWithCommand.Info("executing storage transformers")
|
||||
on := viper.GetBool("storageBackFill.on")
|
||||
if on {
|
||||
backFillStorage(w)
|
||||
@ -198,7 +198,7 @@ func backFillStorage(w watcher.IStorageWatcher) {
|
||||
func watchEthContract(w *watcher.ContractWatcher, wg *syn.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Execute over the ContractTransformerInitializer set using the contract watcher
|
||||
LogWithCommand.Info("executing contract_watcher transformers")
|
||||
logWithCommand.Info("executing contract_watcher transformers")
|
||||
ticker := time.NewTicker(pollingInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
|
@ -49,8 +49,8 @@ Expects ethereum node to be running and requires a .toml config:
|
||||
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
fullSync()
|
||||
},
|
||||
}
|
||||
@ -63,7 +63,7 @@ func init() {
|
||||
func backFillAllBlocks(blockchain core.BlockChain, blockRepository datastore.BlockRepository, missingBlocksPopulated chan int, startingBlockNumber int64) {
|
||||
populated, err := history.PopulateMissingBlocks(blockchain, blockRepository, startingBlockNumber)
|
||||
if err != nil {
|
||||
LogWithCommand.Error("backfillAllBlocks: error in populateMissingBlocks: ", err)
|
||||
logWithCommand.Error("backfillAllBlocks: error in populateMissingBlocks: ", err)
|
||||
}
|
||||
missingBlocksPopulated <- populated
|
||||
}
|
||||
@ -75,13 +75,13 @@ func fullSync() {
|
||||
blockChain := getBlockChain()
|
||||
lastBlock, err := blockChain.LastBlock()
|
||||
if err != nil {
|
||||
LogWithCommand.Error("fullSync: Error getting last block: ", err)
|
||||
logWithCommand.Error("fullSync: Error getting last block: ", err)
|
||||
}
|
||||
if lastBlock.Int64() == 0 {
|
||||
LogWithCommand.Fatal("geth initial: state sync not finished")
|
||||
logWithCommand.Fatal("geth initial: state sync not finished")
|
||||
}
|
||||
if startingBlockNumber > lastBlock.Int64() {
|
||||
LogWithCommand.Fatal("fullSync: starting block number > current block number")
|
||||
logWithCommand.Fatal("fullSync: starting block number > current block number")
|
||||
}
|
||||
|
||||
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
|
||||
@ -95,9 +95,9 @@ func fullSync() {
|
||||
case <-ticker.C:
|
||||
window, err := validator.ValidateBlocks()
|
||||
if err != nil {
|
||||
LogWithCommand.Error("fullSync: error in validateBlocks: ", err)
|
||||
logWithCommand.Error("fullSync: error in validateBlocks: ", err)
|
||||
}
|
||||
LogWithCommand.Debug(window.GetString())
|
||||
logWithCommand.Debug(window.GetString())
|
||||
case <-missingBlocksPopulated:
|
||||
go backFillAllBlocks(blockChain, blockRepository, missingBlocksPopulated, startingBlockNumber)
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ Expects ethereum node to be running and requires a .toml config:
|
||||
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
SubCommand = cmd.CalledAs()
|
||||
LogWithCommand = *log.WithField("SubCommand", SubCommand)
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
headerSync()
|
||||
},
|
||||
}
|
||||
@ -66,7 +66,7 @@ func backFillAllHeaders(blockchain core.BlockChain, headerRepository datastore.H
|
||||
if err != nil {
|
||||
// TODO Lots of possible errors in the call stack above. If errors occur, we still put
|
||||
// 0 in the channel, triggering another round
|
||||
LogWithCommand.Error("backfillAllHeaders: Error populating headers: ", err)
|
||||
logWithCommand.Error("backfillAllHeaders: Error populating headers: ", err)
|
||||
}
|
||||
missingBlocksPopulated <- populated
|
||||
}
|
||||
@ -88,9 +88,9 @@ func headerSync() {
|
||||
case <-ticker.C:
|
||||
window, err := validator.ValidateHeaders()
|
||||
if err != nil {
|
||||
LogWithCommand.Error("headerSync: ValidateHeaders failed: ", err)
|
||||
logWithCommand.Error("headerSync: ValidateHeaders failed: ", err)
|
||||
}
|
||||
LogWithCommand.Debug(window.GetString())
|
||||
logWithCommand.Debug(window.GetString())
|
||||
case n := <-missingBlocksPopulated:
|
||||
if n == 0 {
|
||||
time.Sleep(3 * time.Second)
|
||||
@ -103,12 +103,12 @@ func headerSync() {
|
||||
func validateArgs(blockChain *eth.BlockChain) {
|
||||
lastBlock, err := blockChain.LastBlock()
|
||||
if err != nil {
|
||||
LogWithCommand.Error("validateArgs: Error getting last block: ", err)
|
||||
logWithCommand.Error("validateArgs: Error getting last block: ", err)
|
||||
}
|
||||
if lastBlock.Int64() == 0 {
|
||||
LogWithCommand.Fatal("geth initial: state sync not finished")
|
||||
logWithCommand.Fatal("geth initial: state sync not finished")
|
||||
}
|
||||
if startingBlockNumber > lastBlock.Int64() {
|
||||
LogWithCommand.Fatal("starting block number > current block number")
|
||||
logWithCommand.Fatal("starting block number > current block number")
|
||||
}
|
||||
}
|
||||
|
11
cmd/root.go
11
cmd/root.go
@ -40,6 +40,7 @@ var (
|
||||
cfgFile string
|
||||
databaseConfig config.Database
|
||||
genConfig config.Plugin
|
||||
subscriptionConfig config.Subscription
|
||||
ipc string
|
||||
levelDbPath string
|
||||
queueRecheckInterval time.Duration
|
||||
@ -48,8 +49,8 @@ var (
|
||||
syncAll bool
|
||||
endingBlockNumber int64
|
||||
recheckHeadersArg bool
|
||||
SubCommand string
|
||||
LogWithCommand log.Entry
|
||||
subCommand string
|
||||
logWithCommand log.Entry
|
||||
storageDiffsSource string
|
||||
)
|
||||
|
||||
@ -169,7 +170,7 @@ func getClients() (client.RPCClient, *ethclient.Client) {
|
||||
rawRPCClient, err := rpc.Dial(ipc)
|
||||
|
||||
if err != nil {
|
||||
LogWithCommand.Fatal(err)
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
rpcClient := client.NewRPCClient(rawRPCClient, ipc)
|
||||
ethClient := ethclient.NewClient(rawRPCClient)
|
||||
@ -180,11 +181,11 @@ func getClients() (client.RPCClient, *ethclient.Client) {
|
||||
func getWSClient() core.RPCClient {
|
||||
wsRPCpath := viper.GetString("client.wsPath")
|
||||
if wsRPCpath == "" {
|
||||
LogWithCommand.Fatal(errors.New("getWSClient() was called but no ws rpc path is provided"))
|
||||
logWithCommand.Fatal(errors.New("getWSClient() was called but no ws rpc path is provided"))
|
||||
}
|
||||
wsRPCClient, dialErr := rpc.Dial(wsRPCpath)
|
||||
if dialErr != nil {
|
||||
LogWithCommand.Fatal(dialErr)
|
||||
logWithCommand.Fatal(dialErr)
|
||||
}
|
||||
return client.NewRPCClient(wsRPCClient, wsRPCpath)
|
||||
}
|
||||
|
126
cmd/screenAndServe.go
Normal file
126
cmd/screenAndServe.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright © 2019 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
syn "sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
"github.com/vulcanize/vulcanizedb/utils"
|
||||
)
|
||||
|
||||
// screenAndServeCmd represents the screenAndServe command
|
||||
var screenAndServeCmd = &cobra.Command{
|
||||
Use: "screenAndServe",
|
||||
Short: "Serve super-node data requests to requesting clients",
|
||||
Long: ` It then opens up WS and IPC servers on top of the super-node ETH-IPLD index which
|
||||
relays relevant data to requesting clients. In this mode, the super-node can only relay data which it has
|
||||
already indexed it does not stream out live data.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
screenAndServe()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(screenAndServeCmd)
|
||||
}
|
||||
|
||||
func screenAndServe() {
|
||||
superNode, newNodeErr := newSuperNodeWithoutPairedGethNode()
|
||||
if newNodeErr != nil {
|
||||
logWithCommand.Fatal(newNodeErr)
|
||||
}
|
||||
wg := &syn.WaitGroup{}
|
||||
quitChan := make(chan bool, 1)
|
||||
emptyPayloadChan := make(chan ipfs.IPLDPayload)
|
||||
superNode.ScreenAndServe(wg, emptyPayloadChan, quitChan)
|
||||
|
||||
serverErr := startServers(superNode)
|
||||
if serverErr != nil {
|
||||
logWithCommand.Fatal(serverErr)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func startServers(superNode super_node.NodeInterface) error {
|
||||
var ipcPath string
|
||||
ipcPath = viper.GetString("server.ipcPath")
|
||||
if ipcPath == "" {
|
||||
home, homeDirErr := os.UserHomeDir()
|
||||
if homeDirErr != nil {
|
||||
return homeDirErr
|
||||
}
|
||||
ipcPath = filepath.Join(home, ".vulcanize/vulcanize.ipc")
|
||||
}
|
||||
_, _, ipcErr := rpc.StartIPCEndpoint(ipcPath, superNode.APIs())
|
||||
if ipcErr != nil {
|
||||
return ipcErr
|
||||
}
|
||||
|
||||
var wsEndpoint string
|
||||
wsEndpoint = viper.GetString("server.wsEndpoint")
|
||||
if wsEndpoint == "" {
|
||||
wsEndpoint = "127.0.0.1:8080"
|
||||
}
|
||||
var exposeAll = true
|
||||
var wsOrigins []string
|
||||
_, _, wsErr := rpc.StartWSEndpoint(wsEndpoint, superNode.APIs(), []string{"vdb"}, wsOrigins, exposeAll)
|
||||
if wsErr != nil {
|
||||
return wsErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newSuperNodeWithoutPairedGethNode() (super_node.NodeInterface, error) {
|
||||
ipfsPath = viper.GetString("client.ipfsPath")
|
||||
if ipfsPath == "" {
|
||||
home, homeDirErr := os.UserHomeDir()
|
||||
if homeDirErr != nil {
|
||||
return nil, homeDirErr
|
||||
}
|
||||
ipfsPath = filepath.Join(home, ".ipfs")
|
||||
}
|
||||
ipfsInitErr := ipfs.InitIPFSPlugins()
|
||||
if ipfsInitErr != nil {
|
||||
return nil, ipfsInitErr
|
||||
}
|
||||
ipldFetcher, newFetcherErr := ipfs.NewIPLDFetcher(ipfsPath)
|
||||
if newFetcherErr != nil {
|
||||
return nil, newFetcherErr
|
||||
}
|
||||
db := utils.LoadPostgres(databaseConfig, core.Node{})
|
||||
return &super_node.Service{
|
||||
IPLDFetcher: ipldFetcher,
|
||||
Retriever: super_node.NewCIDRetriever(&db),
|
||||
Resolver: ipfs.NewIPLDResolver(),
|
||||
Subscriptions: make(map[common.Hash]map[rpc.ID]super_node.Subscription),
|
||||
SubscriptionTypes: make(map[common.Hash]config.Subscription),
|
||||
GethNode: core.Node{},
|
||||
}, nil
|
||||
}
|
229
cmd/streamSubscribe.go
Normal file
229
cmd/streamSubscribe.go
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright © 2019 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
|
||||
)
|
||||
|
||||
// streamSubscribeCmd represents the streamSubscribe command
|
||||
var streamSubscribeCmd = &cobra.Command{
|
||||
Use: "streamSubscribe",
|
||||
Short: "This command is used to subscribe to the super node stream with the provided filters",
|
||||
Long: `This command is for demo and testing purposes and is used to subscribe to the super node with the provided subscription configuration parameters.
|
||||
It does not do anything with the data streamed from the super node other than unpack it and print it out for demonstration purposes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
streamSubscribe()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(streamSubscribeCmd)
|
||||
}
|
||||
|
||||
func streamSubscribe() {
|
||||
// Prep the subscription config/filters to be sent to the server
|
||||
configureSubscription()
|
||||
|
||||
// Create a new rpc client and a subscription streamer with that client
|
||||
rpcClient := getRPCClient()
|
||||
str := streamer.NewSuperNodeStreamer(rpcClient)
|
||||
|
||||
// Buffered channel for reading subscription payloads
|
||||
payloadChan := make(chan streamer.SuperNodePayload, 20000)
|
||||
|
||||
// Subscribe to the super node service with the given config/filter parameters
|
||||
sub, err := str.Stream(payloadChan, subscriptionConfig)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
logWithCommand.Info("awaiting payloads")
|
||||
// Receive response payloads and print out the results
|
||||
for {
|
||||
select {
|
||||
case payload := <-payloadChan:
|
||||
if payload.ErrMsg != "" {
|
||||
logWithCommand.Error(payload.ErrMsg)
|
||||
continue
|
||||
}
|
||||
for _, headerRlp := range payload.HeadersRlp {
|
||||
var header types.Header
|
||||
err = rlp.Decode(bytes.NewBuffer(headerRlp), &header)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Header number %d, hash %s\n", header.Number.Int64(), header.Hash().Hex())
|
||||
fmt.Printf("header: %v\n", header)
|
||||
}
|
||||
for _, trxRlp := range payload.TransactionsRlp {
|
||||
var trx types.Transaction
|
||||
buff := bytes.NewBuffer(trxRlp)
|
||||
stream := rlp.NewStream(buff, 0)
|
||||
err := trx.DecodeRLP(stream)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Transaction with hash %s\n", trx.Hash().Hex())
|
||||
fmt.Printf("trx: %v\n", trx)
|
||||
}
|
||||
for _, rctRlp := range payload.ReceiptsRlp {
|
||||
var rct types.ReceiptForStorage
|
||||
buff := bytes.NewBuffer(rctRlp)
|
||||
stream := rlp.NewStream(buff, 0)
|
||||
err = rct.DecodeRLP(stream)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Receipt with block hash %s, trx hash %s\n", rct.BlockHash.Hex(), rct.TxHash.Hex())
|
||||
fmt.Printf("rct: %v\n", rct)
|
||||
for _, l := range rct.Logs {
|
||||
if len(l.Topics) < 1 {
|
||||
logWithCommand.Error(fmt.Sprintf("log only has %d topics", len(l.Topics)))
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Log for block hash %s, trx hash %s, address %s, and with topic0 %s\n",
|
||||
l.BlockHash.Hex(), l.TxHash.Hex(), l.Address.Hex(), l.Topics[0].Hex())
|
||||
fmt.Printf("log: %v\n", l)
|
||||
}
|
||||
}
|
||||
// This assumes leafs only
|
||||
for key, stateRlp := range payload.StateNodesRlp {
|
||||
var acct state.Account
|
||||
err = rlp.Decode(bytes.NewBuffer(stateRlp), &acct)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Account for key %s, and root %s, with balance %d\n",
|
||||
key.Hex(), acct.Root.Hex(), acct.Balance.Int64())
|
||||
fmt.Printf("state account: %v\n", acct)
|
||||
}
|
||||
for stateKey, mappedRlp := range payload.StorageNodesRlp {
|
||||
fmt.Printf("Storage for state key %s ", stateKey.Hex())
|
||||
for storageKey, storageRlp := range mappedRlp {
|
||||
fmt.Printf("with storage key %s\n", storageKey.Hex())
|
||||
var i []interface{}
|
||||
err := rlp.DecodeBytes(storageRlp, &i)
|
||||
if err != nil {
|
||||
logWithCommand.Error(err)
|
||||
continue
|
||||
}
|
||||
// if a leaf node
|
||||
if len(i) == 2 {
|
||||
keyBytes, ok := i[0].([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
valueBytes, ok := i[1].([]byte)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Storage leaf key: %s, and value hash: %s\n",
|
||||
common.BytesToHash(keyBytes).Hex(), common.BytesToHash(valueBytes).Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
case err = <-sub.Err():
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func configureSubscription() {
|
||||
logWithCommand.Info("loading subscription config")
|
||||
subscriptionConfig = config.Subscription{
|
||||
// Below default to false, which means we do not backfill by default
|
||||
BackFill: viper.GetBool("subscription.backfill"),
|
||||
BackFillOnly: viper.GetBool("subscription.backfillOnly"),
|
||||
|
||||
// Below default to 0
|
||||
// 0 start means we start at the beginning and 0 end means we continue indefinitely
|
||||
StartingBlock: big.NewInt(viper.GetInt64("subscription.startingBlock")),
|
||||
EndingBlock: big.NewInt(viper.GetInt64("subscription.endingBlock")),
|
||||
|
||||
// Below default to false, which means we get all headers by default
|
||||
HeaderFilter: config.HeaderFilter{
|
||||
Off: viper.GetBool("subscription.headerFilter.off"),
|
||||
Uncles: viper.GetBool("subscription.headerFilter.uncles"),
|
||||
},
|
||||
|
||||
// Below defaults to false and two slices of length 0
|
||||
// Which means we get all transactions by default
|
||||
TrxFilter: config.TrxFilter{
|
||||
Off: viper.GetBool("subscription.trxFilter.off"),
|
||||
Src: viper.GetStringSlice("subscription.trxFilter.src"),
|
||||
Dst: viper.GetStringSlice("subscription.trxFilter.dst"),
|
||||
},
|
||||
|
||||
// Below defaults to false and one slice of length 0
|
||||
// Which means we get all receipts by default
|
||||
ReceiptFilter: config.ReceiptFilter{
|
||||
Off: viper.GetBool("subscription.receiptFilter.off"),
|
||||
Contracts: viper.GetStringSlice("subscription.receiptFilter.contracts"),
|
||||
Topic0s: viper.GetStringSlice("subscription.receiptFilter.topic0s"),
|
||||
},
|
||||
|
||||
// Below defaults to two false, and a slice of length 0
|
||||
// Which means we get all state leafs by default, but no intermediate nodes
|
||||
StateFilter: config.StateFilter{
|
||||
Off: viper.GetBool("subscription.stateFilter.off"),
|
||||
IntermediateNodes: viper.GetBool("subscription.stateFilter.intermediateNodes"),
|
||||
Addresses: viper.GetStringSlice("subscription.stateFilter.addresses"),
|
||||
},
|
||||
|
||||
// Below defaults to two false, and two slices of length 0
|
||||
// Which means we get all storage leafs by default, but no intermediate nodes
|
||||
StorageFilter: config.StorageFilter{
|
||||
Off: viper.GetBool("subscription.storageFilter.off"),
|
||||
IntermediateNodes: viper.GetBool("subscription.storageFilter.intermediateNodes"),
|
||||
Addresses: viper.GetStringSlice("subscription.storageFilter.addresses"),
|
||||
StorageKeys: viper.GetStringSlice("subscription.storageFilter.storageKeys"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getRPCClient() core.RPCClient {
|
||||
vulcPath := viper.GetString("subscription.path")
|
||||
if vulcPath == "" {
|
||||
vulcPath = "ws://127.0.0.1:8080" // default to and try the default ws url if no path is provided
|
||||
}
|
||||
rawRPCClient, err := rpc.Dial(vulcPath)
|
||||
if err != nil {
|
||||
logWithCommand.Fatal(err)
|
||||
}
|
||||
return client.NewRPCClient(rawRPCClient, vulcPath)
|
||||
}
|
124
cmd/syncAndPublish.go
Normal file
124
cmd/syncAndPublish.go
Normal file
@ -0,0 +1,124 @@
|
||||
// Copyright © 2019 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
syn "sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
|
||||
vRpc "github.com/vulcanize/vulcanizedb/pkg/eth/converters/rpc"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth/node"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
"github.com/vulcanize/vulcanizedb/utils"
|
||||
)
|
||||
|
||||
// syncAndPublishCmd represents the syncAndPublish command
|
||||
var syncAndPublishCmd = &cobra.Command{
|
||||
Use: "syncAndPublish",
|
||||
Short: "Syncs all Ethereum data into IPFS, indexing the CIDs",
|
||||
Long: `This command works alongside a modified geth node which streams
|
||||
all block and state (diff) data over a websocket subscription. This process
|
||||
then converts the eth data to IPLD objects and publishes them to IPFS. Additionally,
|
||||
it maintains a local index of the IPLD objects' CIDs in Postgres.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
syncAndPublish()
|
||||
},
|
||||
}
|
||||
|
||||
var ipfsPath string
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(syncAndPublishCmd)
|
||||
}
|
||||
|
||||
func syncAndPublish() {
|
||||
superNode, newNodeErr := newSuperNode()
|
||||
if newNodeErr != nil {
|
||||
logWithCommand.Fatal(newNodeErr)
|
||||
}
|
||||
wg := &syn.WaitGroup{}
|
||||
syncAndPubErr := superNode.SyncAndPublish(wg, nil, nil)
|
||||
if syncAndPubErr != nil {
|
||||
logWithCommand.Fatal(syncAndPubErr)
|
||||
}
|
||||
if viper.GetBool("superNodeBackFill.on") && viper.GetString("superNodeBackFill.rpcPath") != "" {
|
||||
backfiller, newBackFillerErr := newBackFiller()
|
||||
if newBackFillerErr != nil {
|
||||
logWithCommand.Fatal(newBackFillerErr)
|
||||
}
|
||||
backfiller.FillGaps(wg, nil)
|
||||
}
|
||||
wg.Wait() // If an error was thrown, wg.Add was never called and this will fall through
|
||||
}
|
||||
|
||||
func getBlockChainAndClient(path string) (*eth.BlockChain, core.RPCClient) {
|
||||
rawRPCClient, dialErr := rpc.Dial(path)
|
||||
if dialErr != nil {
|
||||
logWithCommand.Fatal(dialErr)
|
||||
}
|
||||
rpcClient := client.NewRPCClient(rawRPCClient, ipc)
|
||||
ethClient := ethclient.NewClient(rawRPCClient)
|
||||
vdbEthClient := client.NewEthClient(ethClient)
|
||||
vdbNode := node.MakeNode(rpcClient)
|
||||
transactionConverter := vRpc.NewRPCTransactionConverter(ethClient)
|
||||
blockChain := eth.NewBlockChain(vdbEthClient, rpcClient, vdbNode, transactionConverter)
|
||||
return blockChain, rpcClient
|
||||
}
|
||||
|
||||
func newSuperNode() (super_node.NodeInterface, error) {
|
||||
blockChain, rpcClient := getBlockChainAndClient(ipc)
|
||||
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
|
||||
quitChan := make(chan bool)
|
||||
ipfsPath = viper.GetString("client.ipfsPath")
|
||||
if ipfsPath == "" {
|
||||
home, homeDirErr := os.UserHomeDir()
|
||||
if homeDirErr != nil {
|
||||
logWithCommand.Fatal(homeDirErr)
|
||||
}
|
||||
ipfsPath = filepath.Join(home, ".ipfs")
|
||||
}
|
||||
workers := viper.GetInt("client.workers")
|
||||
if workers < 1 {
|
||||
workers = 1
|
||||
}
|
||||
return super_node.NewSuperNode(ipfsPath, &db, rpcClient, quitChan, workers, blockChain.Node())
|
||||
}
|
||||
|
||||
func newBackFiller() (super_node.BackFillInterface, error) {
|
||||
blockChain, archivalRPCClient := getBlockChainAndClient(viper.GetString("superNodeBackFill.rpcPath"))
|
||||
db := utils.LoadPostgres(databaseConfig, blockChain.Node())
|
||||
freq := viper.GetInt("superNodeBackFill.frequency")
|
||||
var frequency time.Duration
|
||||
if freq <= 0 {
|
||||
frequency = time.Minute * 5
|
||||
} else {
|
||||
frequency = time.Duration(freq)
|
||||
}
|
||||
return super_node.NewBackFillService(ipfsPath, &db, archivalRPCClient, time.Minute*frequency, super_node.DefaultMaxBatchSize)
|
||||
}
|
75
cmd/syncPublishScreenAndServe.go
Normal file
75
cmd/syncPublishScreenAndServe.go
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright © 2019 Vulcanize, Inc
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
syn "sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// syncPublishScreenAndServeCmd represents the syncPublishScreenAndServe command
|
||||
var syncPublishScreenAndServeCmd = &cobra.Command{
|
||||
Use: "syncPublishScreenAndServe",
|
||||
Short: "Syncs all Ethereum data into IPFS, indexing the CIDs, and uses this to serve data requests to requesting clients",
|
||||
Long: `This command works alongside a modified geth node which streams
|
||||
all block and state (diff) data over a websocket subscription. This process
|
||||
then converts the eth data to IPLD objects and publishes them to IPFS. Additionally,
|
||||
it maintains a local index of the IPLD objects' CIDs in Postgres. It then opens up a server which
|
||||
relays relevant data to requesting clients.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
subCommand = cmd.CalledAs()
|
||||
logWithCommand = *log.WithField("SubCommand", subCommand)
|
||||
syncPublishScreenAndServe()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(syncPublishScreenAndServeCmd)
|
||||
}
|
||||
|
||||
func syncPublishScreenAndServe() {
|
||||
superNode, newNodeErr := newSuperNode()
|
||||
if newNodeErr != nil {
|
||||
logWithCommand.Fatal(newNodeErr)
|
||||
}
|
||||
|
||||
wg := &syn.WaitGroup{}
|
||||
forwardPayloadChan := make(chan ipfs.IPLDPayload, 20000)
|
||||
forwardQuitChan := make(chan bool, 1)
|
||||
syncAndPubErr := superNode.SyncAndPublish(wg, forwardPayloadChan, forwardQuitChan)
|
||||
if syncAndPubErr != nil {
|
||||
logWithCommand.Fatal(syncAndPubErr)
|
||||
}
|
||||
superNode.ScreenAndServe(wg, forwardPayloadChan, forwardQuitChan)
|
||||
if viper.GetBool("superNodeBackFill.on") && viper.GetString("superNodeBackFill.rpcPath") != "" {
|
||||
backfiller, newBackFillerErr := newBackFiller()
|
||||
if newBackFillerErr != nil {
|
||||
logWithCommand.Fatal(newBackFillerErr)
|
||||
}
|
||||
backfiller.FillGaps(wg, nil)
|
||||
}
|
||||
|
||||
serverErr := startServers(superNode)
|
||||
if serverErr != nil {
|
||||
logWithCommand.Fatal(serverErr)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.blocks (
|
||||
CREATE TABLE public.eth_blocks (
|
||||
id SERIAL PRIMARY KEY,
|
||||
difficulty BIGINT,
|
||||
extra_data VARCHAR,
|
||||
@ -20,4 +20,4 @@ CREATE TABLE public.blocks (
|
||||
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.blocks;
|
||||
DROP TABLE public.eth_blocks;
|
@ -1,7 +1,7 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE full_sync_transactions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks(id) ON DELETE CASCADE,
|
||||
block_id INTEGER NOT NULL REFERENCES eth_blocks(id) ON DELETE CASCADE,
|
||||
gas_limit NUMERIC,
|
||||
gas_price NUMERIC,
|
||||
hash VARCHAR(66),
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX number_index ON blocks (number);
|
||||
CREATE INDEX number_index ON eth_blocks (number);
|
||||
|
||||
|
||||
-- +goose Down
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
ADD COLUMN node_id INTEGER NOT NULL,
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id)
|
||||
@ -7,5 +7,5 @@ REFERENCES nodes (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
DROP COLUMN node_id;
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
DROP CONSTRAINT node_id_block_number_uc;
|
||||
|
@ -1,5 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX node_id_index ON blocks (node_id);
|
||||
CREATE INDEX node_id_index ON eth_blocks (node_id);
|
||||
|
||||
-- +goose Down
|
||||
DROP INDEX node_id_index;
|
||||
|
@ -7,14 +7,14 @@ ALTER TABLE public.eth_nodes DROP CONSTRAINT node_uc;
|
||||
ALTER TABLE public.eth_nodes
|
||||
ADD CONSTRAINT eth_node_uc UNIQUE (genesis_block, network_id, eth_node_id);
|
||||
|
||||
ALTER TABLE public.blocks RENAME COLUMN node_id TO eth_node_id;
|
||||
ALTER TABLE public.eth_blocks RENAME COLUMN node_id TO eth_node_id;
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_id_block_number_uc;
|
||||
ALTER TABLE public.eth_blocks
|
||||
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.eth_blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (eth_node_id) REFERENCES eth_nodes (id) ON DELETE CASCADE;
|
||||
|
||||
@ -31,13 +31,13 @@ ALTER TABLE public.nodes
|
||||
ALTER TABLE public.nodes
|
||||
ADD CONSTRAINT node_uc UNIQUE (genesis_block, network_id, node_id);
|
||||
|
||||
ALTER TABLE public.blocks RENAME COLUMN eth_node_id TO node_id;
|
||||
ALTER TABLE public.eth_blocks RENAME COLUMN eth_node_id TO node_id;
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT eth_node_id_block_number_uc;
|
||||
ALTER TABLE public.blocks
|
||||
ALTER TABLE public.eth_blocks DROP CONSTRAINT eth_node_id_block_number_uc;
|
||||
ALTER TABLE public.eth_blocks
|
||||
ADD CONSTRAINT node_id_block_number_uc UNIQUE (number, node_id);
|
||||
|
||||
ALTER TABLE public.blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.blocks
|
||||
ALTER TABLE public.eth_blocks DROP CONSTRAINT node_fk;
|
||||
ALTER TABLE public.eth_blocks
|
||||
ADD CONSTRAINT node_fk
|
||||
FOREIGN KEY (node_id) REFERENCES nodes (id) ON DELETE CASCADE;
|
||||
|
@ -11,9 +11,9 @@ ALTER TABLE full_sync_receipts
|
||||
ALTER COLUMN block_id SET NOT NULL;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
ADD CONSTRAINT blocks_fk
|
||||
ADD CONSTRAINT eth_blocks_fk
|
||||
FOREIGN KEY (block_id)
|
||||
REFERENCES blocks (id)
|
||||
REFERENCES eth_blocks (id)
|
||||
ON DELETE CASCADE;
|
||||
|
||||
ALTER TABLE full_sync_receipts
|
||||
|
@ -1,16 +1,16 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
ADD COLUMN eth_node_fingerprint VARCHAR(128);
|
||||
|
||||
UPDATE blocks
|
||||
UPDATE eth_blocks
|
||||
SET eth_node_fingerprint = (
|
||||
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = blocks.eth_node_id
|
||||
SELECT eth_node_id FROM eth_nodes WHERE eth_nodes.id = eth_blocks.eth_node_id
|
||||
);
|
||||
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
ALTER COLUMN eth_node_fingerprint SET NOT NULL;
|
||||
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE blocks
|
||||
ALTER TABLE eth_blocks
|
||||
DROP COLUMN eth_node_fingerprint;
|
||||
|
@ -2,7 +2,7 @@
|
||||
CREATE TABLE public.uncles (
|
||||
id SERIAL PRIMARY KEY,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
block_id INTEGER NOT NULL REFERENCES blocks (id) ON DELETE CASCADE,
|
||||
block_id INTEGER NOT NULL REFERENCES eth_blocks (id) ON DELETE CASCADE,
|
||||
reward NUMERIC NOT NULL,
|
||||
miner VARCHAR(42) NOT NULL,
|
||||
raw JSONB,
|
||||
|
12
db/migrations/00031_create_header_cids_table.sql
Normal file
12
db/migrations/00031_create_header_cids_table.sql
Normal file
@ -0,0 +1,12 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.header_cids (
|
||||
id SERIAL PRIMARY KEY,
|
||||
block_number BIGINT NOT NULL,
|
||||
block_hash VARCHAR(66) NOT NULL,
|
||||
cid TEXT NOT NULL,
|
||||
uncle BOOLEAN NOT NULL,
|
||||
UNIQUE (block_number, block_hash)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.header_cids;
|
13
db/migrations/00032_create_transaction_cids_table.sql
Normal file
13
db/migrations/00032_create_transaction_cids_table.sql
Normal file
@ -0,0 +1,13 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.transaction_cids (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER NOT NULL REFERENCES header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||
tx_hash VARCHAR(66) NOT NULL,
|
||||
cid TEXT NOT NULL,
|
||||
dst VARCHAR(66) NOT NULL,
|
||||
src VARCHAR(66) NOT NULL,
|
||||
UNIQUE (header_id, tx_hash)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.transaction_cids;
|
11
db/migrations/00033_create_receipt_cids_table.sql
Normal file
11
db/migrations/00033_create_receipt_cids_table.sql
Normal file
@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.receipt_cids (
|
||||
id SERIAL PRIMARY KEY,
|
||||
tx_id INTEGER NOT NULL REFERENCES transaction_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||
cid TEXT NOT NULL,
|
||||
contract VARCHAR(66),
|
||||
topic0s VARCHAR(66)[]
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.receipt_cids;
|
12
db/migrations/00034_create_state_cids_table.sql
Normal file
12
db/migrations/00034_create_state_cids_table.sql
Normal file
@ -0,0 +1,12 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.state_cids (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER NOT NULL REFERENCES header_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||
state_key VARCHAR(66) NOT NULL,
|
||||
leaf BOOLEAN NOT NULL,
|
||||
cid TEXT NOT NULL,
|
||||
UNIQUE (header_id, state_key)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.state_cids;
|
12
db/migrations/00035_create_storage_cids_table.sql
Normal file
12
db/migrations/00035_create_storage_cids_table.sql
Normal file
@ -0,0 +1,12 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE public.storage_cids (
|
||||
id SERIAL PRIMARY KEY,
|
||||
state_id INTEGER NOT NULL REFERENCES state_cids (id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
|
||||
storage_key VARCHAR(66) NOT NULL,
|
||||
leaf BOOLEAN NOT NULL,
|
||||
cid TEXT NOT NULL,
|
||||
UNIQUE (state_id, storage_key)
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.storage_cids;
|
8
db/migrations/00036_create_ipfs_blocks_table.sql
Normal file
8
db/migrations/00036_create_ipfs_blocks_table.sql
Normal file
@ -0,0 +1,8 @@
|
||||
-- +goose Up
|
||||
CREATE TABLE IF NOT EXISTS public.blocks (
|
||||
key TEXT UNIQUE NOT NULL,
|
||||
data BYTEA NOT NULL
|
||||
);
|
||||
|
||||
-- +goose Down
|
||||
DROP TABLE public.blocks;
|
449
db/schema.sql
449
db/schema.sql
@ -85,47 +85,11 @@ CREATE VIEW public.block_stats AS
|
||||
--
|
||||
|
||||
CREATE TABLE public.blocks (
|
||||
id integer NOT NULL,
|
||||
difficulty bigint,
|
||||
extra_data character varying,
|
||||
gas_limit bigint,
|
||||
gas_used bigint,
|
||||
hash character varying(66),
|
||||
miner character varying(42),
|
||||
nonce character varying(20),
|
||||
number bigint,
|
||||
parent_hash character varying(66),
|
||||
reward numeric,
|
||||
uncles_reward numeric,
|
||||
size character varying,
|
||||
"time" bigint,
|
||||
is_final boolean,
|
||||
uncle_hash character varying(66),
|
||||
eth_node_id integer NOT NULL,
|
||||
eth_node_fingerprint character varying(128) NOT NULL
|
||||
key text NOT NULL,
|
||||
data bytea NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.blocks_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.blocks_id_seq OWNED BY public.blocks.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: checked_headers; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
@ -156,6 +120,52 @@ CREATE SEQUENCE public.checked_headers_id_seq
|
||||
ALTER SEQUENCE public.checked_headers_id_seq OWNED BY public.checked_headers.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_blocks; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.eth_blocks (
|
||||
id integer NOT NULL,
|
||||
difficulty bigint,
|
||||
extra_data character varying,
|
||||
gas_limit bigint,
|
||||
gas_used bigint,
|
||||
hash character varying(66),
|
||||
miner character varying(42),
|
||||
nonce character varying(20),
|
||||
number bigint,
|
||||
parent_hash character varying(66),
|
||||
reward numeric,
|
||||
uncles_reward numeric,
|
||||
size character varying,
|
||||
"time" bigint,
|
||||
is_final boolean,
|
||||
uncle_hash character varying(66),
|
||||
eth_node_id integer NOT NULL,
|
||||
eth_node_fingerprint character varying(128) NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_blocks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.eth_blocks_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_blocks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.eth_blocks_id_seq OWNED BY public.eth_blocks.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_nodes; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
@ -297,6 +307,39 @@ CREATE SEQUENCE public.goose_db_version_id_seq
|
||||
ALTER SEQUENCE public.goose_db_version_id_seq OWNED BY public.goose_db_version.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.header_cids (
|
||||
id integer NOT NULL,
|
||||
block_number bigint NOT NULL,
|
||||
block_hash character varying(66) NOT NULL,
|
||||
cid text NOT NULL,
|
||||
uncle boolean NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.header_cids_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.header_cids_id_seq OWNED BY public.header_cids.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_sync_logs; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
@ -545,6 +588,139 @@ CREATE SEQUENCE public.queued_storage_id_seq
|
||||
ALTER SEQUENCE public.queued_storage_id_seq OWNED BY public.queued_storage.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.receipt_cids (
|
||||
id integer NOT NULL,
|
||||
tx_id integer NOT NULL,
|
||||
cid text NOT NULL,
|
||||
contract character varying(66),
|
||||
topic0s character varying(66)[]
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.receipt_cids_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.receipt_cids_id_seq OWNED BY public.receipt_cids.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.state_cids (
|
||||
id integer NOT NULL,
|
||||
header_id integer NOT NULL,
|
||||
state_key character varying(66) NOT NULL,
|
||||
leaf boolean NOT NULL,
|
||||
cid text NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.state_cids_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.state_cids_id_seq OWNED BY public.state_cids.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.storage_cids (
|
||||
id integer NOT NULL,
|
||||
state_id integer NOT NULL,
|
||||
storage_key character varying(66) NOT NULL,
|
||||
leaf boolean NOT NULL,
|
||||
cid text NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.storage_cids_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.storage_cids_id_seq OWNED BY public.storage_cids.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE public.transaction_cids (
|
||||
id integer NOT NULL,
|
||||
header_id integer NOT NULL,
|
||||
tx_hash character varying(66) NOT NULL,
|
||||
cid text NOT NULL,
|
||||
dst character varying(66) NOT NULL,
|
||||
src character varying(66) NOT NULL
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids_id_seq; Type: SEQUENCE; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE public.transaction_cids_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE public.transaction_cids_id_seq OWNED BY public.transaction_cids.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: uncles; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
@ -674,13 +850,6 @@ ALTER SEQUENCE public.watched_logs_id_seq OWNED BY public.watched_logs.id;
|
||||
ALTER TABLE ONLY public.addresses ALTER COLUMN id SET DEFAULT nextval('public.addresses_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.blocks ALTER COLUMN id SET DEFAULT nextval('public.blocks_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: checked_headers id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
@ -688,6 +857,13 @@ ALTER TABLE ONLY public.blocks ALTER COLUMN id SET DEFAULT nextval('public.block
|
||||
ALTER TABLE ONLY public.checked_headers ALTER COLUMN id SET DEFAULT nextval('public.checked_headers_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_blocks id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.eth_blocks ALTER COLUMN id SET DEFAULT nextval('public.eth_blocks_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_nodes id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
@ -723,6 +899,13 @@ ALTER TABLE ONLY public.full_sync_transactions ALTER COLUMN id SET DEFAULT nextv
|
||||
ALTER TABLE ONLY public.goose_db_version ALTER COLUMN id SET DEFAULT nextval('public.goose_db_version_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.header_cids ALTER COLUMN id SET DEFAULT nextval('public.header_cids_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_sync_logs id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
@ -765,6 +948,34 @@ ALTER TABLE ONLY public.log_filters ALTER COLUMN id SET DEFAULT nextval('public.
|
||||
ALTER TABLE ONLY public.queued_storage ALTER COLUMN id SET DEFAULT nextval('public.queued_storage_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.receipt_cids ALTER COLUMN id SET DEFAULT nextval('public.receipt_cids_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.state_cids ALTER COLUMN id SET DEFAULT nextval('public.state_cids_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.storage_cids ALTER COLUMN id SET DEFAULT nextval('public.storage_cids_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.transaction_cids ALTER COLUMN id SET DEFAULT nextval('public.transaction_cids_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: uncles id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
@ -803,11 +1014,11 @@ ALTER TABLE ONLY public.addresses
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
-- Name: blocks blocks_key_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.blocks
|
||||
ADD CONSTRAINT blocks_pkey PRIMARY KEY (id);
|
||||
ADD CONSTRAINT blocks_key_key UNIQUE (key);
|
||||
|
||||
|
||||
--
|
||||
@ -827,10 +1038,18 @@ ALTER TABLE ONLY public.checked_headers
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks eth_node_id_block_number_uc; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
-- Name: eth_blocks eth_blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.blocks
|
||||
ALTER TABLE ONLY public.eth_blocks
|
||||
ADD CONSTRAINT eth_blocks_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: eth_blocks eth_node_id_block_number_uc; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.eth_blocks
|
||||
ADD CONSTRAINT eth_node_id_block_number_uc UNIQUE (number, eth_node_id);
|
||||
|
||||
|
||||
@ -874,6 +1093,22 @@ ALTER TABLE ONLY public.goose_db_version
|
||||
ADD CONSTRAINT goose_db_version_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids header_cids_block_number_block_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.header_cids
|
||||
ADD CONSTRAINT header_cids_block_number_block_hash_key UNIQUE (block_number, block_hash);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_cids header_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.header_cids
|
||||
ADD CONSTRAINT header_cids_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: header_sync_logs header_sync_logs_header_id_tx_index_log_index_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -970,6 +1205,62 @@ ALTER TABLE ONLY public.queued_storage
|
||||
ADD CONSTRAINT queued_storage_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids receipt_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.receipt_cids
|
||||
ADD CONSTRAINT receipt_cids_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids state_cids_header_id_state_key_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.state_cids
|
||||
ADD CONSTRAINT state_cids_header_id_state_key_key UNIQUE (header_id, state_key);
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids state_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.state_cids
|
||||
ADD CONSTRAINT state_cids_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids storage_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.storage_cids
|
||||
ADD CONSTRAINT storage_cids_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids storage_cids_state_id_storage_key_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.storage_cids
|
||||
ADD CONSTRAINT storage_cids_state_id_storage_key_key UNIQUE (state_id, storage_key);
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids transaction_cids_header_id_tx_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.transaction_cids
|
||||
ADD CONSTRAINT transaction_cids_header_id_tx_hash_key UNIQUE (header_id, tx_hash);
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids transaction_cids_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.transaction_cids
|
||||
ADD CONSTRAINT transaction_cids_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: uncles uncles_block_id_hash_key; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1028,14 +1319,14 @@ CREATE INDEX headers_block_number ON public.headers USING btree (block_number);
|
||||
-- Name: node_id_index; Type: INDEX; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE INDEX node_id_index ON public.blocks USING btree (eth_node_id);
|
||||
CREATE INDEX node_id_index ON public.eth_blocks USING btree (eth_node_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: number_index; Type: INDEX; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
CREATE INDEX number_index ON public.blocks USING btree (number);
|
||||
CREATE INDEX number_index ON public.eth_blocks USING btree (number);
|
||||
|
||||
|
||||
--
|
||||
@ -1052,14 +1343,6 @@ CREATE INDEX tx_from_index ON public.full_sync_transactions USING btree (tx_from
|
||||
CREATE INDEX tx_to_index ON public.full_sync_transactions USING btree (tx_to);
|
||||
|
||||
|
||||
--
|
||||
-- Name: full_sync_receipts blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.full_sync_receipts
|
||||
ADD CONSTRAINT blocks_fk FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: checked_headers checked_headers_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1068,6 +1351,14 @@ ALTER TABLE ONLY public.checked_headers
|
||||
ADD CONSTRAINT checked_headers_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: full_sync_receipts eth_blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.full_sync_receipts
|
||||
ADD CONSTRAINT eth_blocks_fk FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: full_sync_receipts full_sync_receipts_contract_address_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1081,7 +1372,7 @@ ALTER TABLE ONLY public.full_sync_receipts
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.full_sync_transactions
|
||||
ADD CONSTRAINT full_sync_transactions_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||
ADD CONSTRAINT full_sync_transactions_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
@ -1141,13 +1432,21 @@ ALTER TABLE ONLY public.headers
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks node_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
-- Name: eth_blocks node_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.blocks
|
||||
ALTER TABLE ONLY public.eth_blocks
|
||||
ADD CONSTRAINT node_fk FOREIGN KEY (eth_node_id) REFERENCES public.eth_nodes(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: receipt_cids receipt_cids_tx_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.receipt_cids
|
||||
ADD CONSTRAINT receipt_cids_tx_id_fkey FOREIGN KEY (tx_id) REFERENCES public.transaction_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
--
|
||||
-- Name: full_sync_logs receipts_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1156,12 +1455,36 @@ ALTER TABLE ONLY public.full_sync_logs
|
||||
ADD CONSTRAINT receipts_fk FOREIGN KEY (receipt_id) REFERENCES public.full_sync_receipts(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: state_cids state_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.state_cids
|
||||
ADD CONSTRAINT state_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
--
|
||||
-- Name: storage_cids storage_cids_state_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.storage_cids
|
||||
ADD CONSTRAINT storage_cids_state_id_fkey FOREIGN KEY (state_id) REFERENCES public.state_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
--
|
||||
-- Name: transaction_cids transaction_cids_header_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.transaction_cids
|
||||
ADD CONSTRAINT transaction_cids_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.header_cids(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
--
|
||||
-- Name: uncles uncles_block_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY public.uncles
|
||||
ADD CONSTRAINT uncles_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.blocks(id) ON DELETE CASCADE;
|
||||
ADD CONSTRAINT uncles_block_id_fkey FOREIGN KEY (block_id) REFERENCES public.eth_blocks(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
|
75
dockerfiles/seed_node/Dockerfile
Normal file
75
dockerfiles/seed_node/Dockerfile
Normal file
@ -0,0 +1,75 @@
|
||||
FROM golang:alpine
|
||||
|
||||
RUN apk --update --no-cache add make git g++ linux-headers
|
||||
# DEBUG
|
||||
RUN apk add busybox-extras
|
||||
|
||||
# this is probably a noob move, but I want apk from alpine for the above but need to avoid Go 1.13 below as this error still occurs https://github.com/ipfs/go-ipfs/issues/6603
|
||||
FROM golang:1.12.4 as builder
|
||||
|
||||
# Get and build vulcanizedb ipfs_concurreny fork
|
||||
RUN go get -u -d github.com/vulcanize/vulcanizedb
|
||||
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||
RUN git checkout ipfs_concurrency
|
||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o vulcanizedb .
|
||||
|
||||
# Get and build vulcanize's go-ipfs fork
|
||||
RUN go get -u -d github.com/ipfs/go-ipfs
|
||||
WORKDIR /go/src/github.com/ipfs/go-ipfs
|
||||
RUN git remote add vulcanize https://github.com/vulcanize/go-ipfs.git
|
||||
RUN git fetch vulcanize
|
||||
RUN git checkout -b pg_ipfs vulcanize/postgres_update
|
||||
RUN GO111MODULE=on GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o ipfs ./cmd/ipfs
|
||||
|
||||
# Get and build vulcanize's geth fork
|
||||
RUN go get -u -d github.com/ethereum/go-ethereum
|
||||
WORKDIR /go/src/github.com/ethereum/go-ethereum
|
||||
RUN git remote add vulcanize https://github.com/vulcanize/go-ethereum.git
|
||||
RUN git fetch vulcanize
|
||||
RUN git checkout -b statediff_geth vulcanize/statediffing
|
||||
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o geth ./cmd/geth
|
||||
|
||||
# Build migration tool
|
||||
RUN go get -u -d github.com/pressly/goose/cmd/goose
|
||||
WORKDIR /go/src/github.com/pressly/goose/cmd/goose
|
||||
RUN GCO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -tags='no_mysql no_sqlite' -o goose .
|
||||
|
||||
WORKDIR /go/src/github.com/vulcanize/vulcanizedb
|
||||
|
||||
# app container
|
||||
FROM alpine
|
||||
WORKDIR /app
|
||||
|
||||
ARG USER
|
||||
ARG config_file=environments/syncPublishScreenAndServe.toml
|
||||
ARG vdb_dbname="vulcanize_public"
|
||||
ARG vdb_hostname="localhost"
|
||||
ARG vdb_port="5432"
|
||||
ARG vdb_user="postgres"
|
||||
ARG vdb_password
|
||||
|
||||
# setup environment
|
||||
ENV VDB_PG_NAME="$vdb_dbname"
|
||||
ENV VDB_PG_HOSTNAME="$vdb_hostname"
|
||||
ENV VDB_PG_PORT="$vdb_port"
|
||||
ENV VDB_PG_USER="$vdb_user"
|
||||
ENV VDB_PG_PASSWORD="$vdb_password"
|
||||
|
||||
RUN adduser -D 5000 $USER
|
||||
USER $USER
|
||||
|
||||
# chown first so dir is writable
|
||||
# note: using $USER is merged, but not in the stable release yet
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/$config_file config.toml
|
||||
COPY --chown=5000:5000 --from=builder /go/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node/startup_script.sh .
|
||||
|
||||
# keep binaries immutable
|
||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/vulcanizedb vulcanizedb
|
||||
COPY --from=builder /go/src/github.com/pressly/goose/cmd/goose/goose goose
|
||||
COPY --from=builder /go/src/github.com/vulcanize/vulcanizedb/db/migrations migrations/vulcanizedb
|
||||
COPY --from=builder /go/src/github.com/ipfs/go-ipfs/ipfs ipfs
|
||||
COPY --from=builder /go/src/github.com/ethereum/go-ethereum/geth geth
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["./startup_script.sh"]
|
71
dockerfiles/seed_node/startup_script.sh
Executable file
71
dockerfiles/seed_node/startup_script.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/bin/sh
|
||||
# Runs the db migrations and starts the super node services
|
||||
|
||||
# Exit if the variable tests fail
|
||||
set -e
|
||||
|
||||
# Check the database variables are set
|
||||
test $VDB_PG_NAME
|
||||
test $VDB_PG_HOSTNAME
|
||||
test $VDB_PG_PORT
|
||||
test $VDB_PG_USER
|
||||
set +e
|
||||
|
||||
# Export our database variables so that the IPFS Postgres plugin can use them
|
||||
export IPFS_PGHOST=$VDB_PG_HOSTNAME
|
||||
export IPFS_PGUSER=$VDB_PG_USER
|
||||
export IPFS_PGDATABASE=$VDB_PG_NAME
|
||||
export IPFS_PGPORT=$VDB_PG_PORT
|
||||
export IPFS_PGPASSWORD=$VDB_PG_PASSWORD
|
||||
|
||||
# Construct the connection string for postgres
|
||||
VDB_PG_CONNECT=postgresql://$VDB_PG_USER:$VDB_PG_PASSWORD@$VDB_PG_HOSTNAME:$VDB_PG_PORT/$VDB_PG_NAME?sslmode=disable
|
||||
|
||||
# Run the DB migrations
|
||||
echo "Connecting with: $VDB_PG_CONNECT"
|
||||
echo "Running database migrations"
|
||||
./goose -dir migrations/vulcanizedb postgres "$VDB_PG_CONNECT" up
|
||||
|
||||
# If the db migrations ran without err
|
||||
if [ $? -eq 0 ]; then
|
||||
# Initialize PG-IPFS
|
||||
echo "Initializing Postgres-IPFS profile"
|
||||
./ipfs init --profile=postgresds
|
||||
else
|
||||
echo "Could not run migrations. Are the database details correct?"
|
||||
exit
|
||||
fi
|
||||
|
||||
# If IPFS initialization was successful
|
||||
if [ $? -eq 0 ]; then
|
||||
# Begin the state-diffing Geth process
|
||||
echo "Beginning the state-diffing Geth process"
|
||||
./geth --statediff --statediff.streamblock --ws --syncmode=full 2>&1 | tee -a log.txt &
|
||||
sleep 1
|
||||
else
|
||||
echo "Could not initialize Postgres backed IPFS profile. Are the database details correct?"
|
||||
exit
|
||||
fi
|
||||
|
||||
# If Geth startup was successful
|
||||
if [ $? -eq 0 ]; then
|
||||
# Wait until block synchronisation has begun
|
||||
echo "Waiting for block synchronization to begin"
|
||||
( tail -f -n0 log.txt & ) | grep -q "Block synchronisation started" # this blocks til we see "Block synchronisation started"
|
||||
# And then spin up the syncPublishScreenAndServe Vulcanizedb service
|
||||
echo "Beginning the syncPublishScreenAndServe vulcanizedb process"
|
||||
./vulcanizedb syncPublishScreenAndServe --config=config.toml 2>&1 | tee -a log.txt &
|
||||
else
|
||||
echo "Could not initialize state-diffing Geth."
|
||||
exit
|
||||
fi
|
||||
|
||||
# If Vulcanizedb startup was successful
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Seed node successfully booted"
|
||||
else
|
||||
echo "Could not start vulcanizedb syncPublishScreenAndServe process. Is the config file correct?"
|
||||
exit
|
||||
fi
|
||||
|
||||
wait
|
@ -14,6 +14,8 @@ conform to the
|
||||
[standard-readme specification](https://github.com/RichardLitt/standard-readme).
|
||||
- Once a Pull Request has received two approvals it can be merged in by a core developer.
|
||||
|
||||
Pull requests should be opened against the `staging` branch. Periodically, updates on `staging` will be ported over to `master` for tagged release.
|
||||
|
||||
## Creating a new migration file
|
||||
1. `make new_migration NAME=add_columnA_to_table1`
|
||||
- This will create a new timestamped migration file in `db/migrations`
|
||||
|
@ -100,7 +100,11 @@ The config provides information for composing a set of transformers from externa
|
||||
|
||||
[client]
|
||||
ipcPath = "/Users/user/Library/Ethereum/geth.ipc"
|
||||
<<<<<<< HEAD
|
||||
wsPath = "ws://127.0.0.1:8546"
|
||||
=======
|
||||
wsPath = "http://127.0.0.1:"
|
||||
>>>>>>> review fixes
|
||||
|
||||
[exporter]
|
||||
home = "github.com/vulcanize/vulcanizedb"
|
||||
@ -202,7 +206,7 @@ Storage transformers stream data from a geth subscription or parity csv file whe
|
||||
full sync progresses. If the transformers have missed consuming a range of diffs due to lag in the startup of the processes or due to misalignment of the sync,
|
||||
we can configure our storage transformers to backfill missing diffs from a [modified archival geth client](https://github.com/vulcanize/go-ethereum/tree/statediff_at).
|
||||
|
||||
To do so, add the following fields to the config file.
|
||||
To do so, add the following field to the config file.
|
||||
```toml
|
||||
[storageBackFill]
|
||||
on = false
|
||||
|
345
documentation/super-node.md
Normal file
345
documentation/super-node.md
Normal file
@ -0,0 +1,345 @@
|
||||
# Seed Node
|
||||
|
||||
Vulcanizedb can act as an index for Ethereum data stored on IPFS through the use of the `syncAndPublish` and
|
||||
`syncPublishScreenAndServe` commands.
|
||||
|
||||
## Manual Setup
|
||||
|
||||
These commands work in conjunction with a [state-diffing full Geth node](https://github.com/vulcanize/go-ethereum/tree/statediffing)
|
||||
and IPFS.
|
||||
|
||||
### IPFS
|
||||
To start, download and install [IPFS](https://github.com/vulcanize/go-ipfs)
|
||||
|
||||
`go get github.com/ipfs/go-ipfs`
|
||||
|
||||
`cd $GOPATH/src/github.com/ipfs/go-ipfs`
|
||||
|
||||
`make install`
|
||||
|
||||
If we want to use Postgres as our backing datastore, we need to use the vulcanize fork of go-ipfs.
|
||||
|
||||
Start by adding the fork and switching over to it:
|
||||
|
||||
`git remote add vulcanize https://github.com/vulcanize/go-ipfs.git`
|
||||
|
||||
`git fetch vulcanize`
|
||||
|
||||
`git checkout -b postgres_update vulcanize/postgres_update`
|
||||
|
||||
Now install this fork of ipfs, first be sure to remove any previous installation.
|
||||
|
||||
`make install`
|
||||
|
||||
Check that is installed properly by running
|
||||
|
||||
`ipfs`
|
||||
|
||||
You should see the CLI info/help output.
|
||||
|
||||
And now we initialize with the `postgresds` profile.
|
||||
If ipfs was previously initialized we will need to remove the old profile first.
|
||||
We also need to provide env variables for the postgres connection:
|
||||
|
||||
We can either set these manually, e.g.
|
||||
```bash
|
||||
export IPFS_PGHOST=
|
||||
export IPFS_PGUSER=
|
||||
export IPFS_PGDATABASE=
|
||||
export IPFS_PGPORT=
|
||||
export IPFS_PGPASSWORD=
|
||||
```
|
||||
|
||||
And then run the ipfs command
|
||||
|
||||
`ipfs init --profile=postgresds`
|
||||
|
||||
Or we can use the pre-made script at `GOPATH/src/github.com/ipfs/go-ipfs/misc/utility/ipfs_postgres.sh`
|
||||
which has usage:
|
||||
|
||||
`./ipfs_postgres.sh <IPFS_PGHOST> <IPFS_PGPORT> <IPFS_PGUSER> <IPFS_PGDATABASE>"`
|
||||
|
||||
and will ask us to enter the password, avoiding storing it to an ENV variable.
|
||||
|
||||
Once we have initialized ipfs, that is all we need to do with it- we do not need to run a daemon during the subsequent processes (in fact, we can't).
|
||||
|
||||
### Geth
|
||||
For Geth, we currently *require* a special fork, and we can set this up as follows:
|
||||
|
||||
Begin by downloading geth and switching to the vulcanize/rpc_statediffing branch
|
||||
|
||||
`go get github.com/ethereum/go-ethereum`
|
||||
|
||||
`cd $GOPATH/src/github.com/ethereum/go-ethereum`
|
||||
|
||||
`git remote add vulcanize https://github.com/vulcanize/go-ethereum.git`
|
||||
|
||||
`git fetch vulcanize`
|
||||
|
||||
`git checkout -b statediffing vulcanize/statediffing`
|
||||
|
||||
Now, install this fork of geth (make sure any old versions have been uninstalled/binaries removed first)
|
||||
|
||||
`make geth`
|
||||
|
||||
And run the output binary with statediffing turned on:
|
||||
|
||||
`cd $GOPATH/src/github.com/ethereum/go-ethereum/build/bin`
|
||||
|
||||
`./geth --statediff --statediff.streamblock --ws --syncmode=full`
|
||||
|
||||
Note: other CLI options- statediff specific ones included- can be explored with `./geth help`
|
||||
|
||||
The output from geth should mention that it is `Starting statediff service` and block synchronization should begin shortly thereafter.
|
||||
Note that until it receives a subscriber, the statediffing process does essentially nothing. Once a subscription is received, this
|
||||
will be indicated in the output.
|
||||
|
||||
Also in the output will be the websocket url and ipc paths that we will use to subscribe to the statediffing process.
|
||||
The default ws url is "ws://127.0.0.1:8546" and the default ipcPath- on Darwin systems only- is "Users/user/Library/Ethereum/geth.ipc"
|
||||
|
||||
### Vulcanizedb
|
||||
|
||||
There are two commands to choose from:
|
||||
|
||||
#### syncAndPublish
|
||||
|
||||
`syncAndPublih` performs the functions of the super node- syncing data from Geth, converting them to IPLDs,
|
||||
publishing those IPLDs to IPFS, and creating a local Postgres index to relate their CIDS to useful metadata.
|
||||
|
||||
Usage:
|
||||
|
||||
`./vulcanizedb syncAndPublish --config=<config_file.toml>`
|
||||
|
||||
The config file for the `syncAndPublish` command looks very similar to the basic config file
|
||||
```toml
|
||||
[database]
|
||||
name = "vulcanize_demo"
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
|
||||
[client]
|
||||
ipcPath = "ws://127.0.0.1:8546"
|
||||
ipfsPath = "/Users/user/.ipfs"
|
||||
```
|
||||
|
||||
With an additional field, `client.ipcPath`, that is either the ws url or the ipc path that Geth has exposed (the url and path output
|
||||
when the geth sync was started), and `client.ipfsPath` which is the path the ipfs datastore directory.
|
||||
|
||||
#### syncPublishScreenAndServe
|
||||
|
||||
`syncPublishScreenAndServe` does everything that `syncAndPublish` does, plus it opens up an RPC server which exposes
|
||||
an endpoint to allow transformers to subscribe to subsets of the sync-and-published data that are relevant to their transformations
|
||||
|
||||
Usage:
|
||||
|
||||
`./vulcanizedb syncPublishScreenAndServe --config=<config_file.toml>`
|
||||
|
||||
The config file for the `syncPublishScreenAndServe` command has two additional fields and looks like:
|
||||
|
||||
```toml
|
||||
[database]
|
||||
name = "vulcanize_demo"
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
|
||||
[client]
|
||||
ipcPath = "ws://127.0.0.1:8546"
|
||||
ipfsPath = "/Users/user/.ipfs"
|
||||
|
||||
[server]
|
||||
ipcPath = "/Users/user/.vulcanize/vulcanize.ipc"
|
||||
wsEndpoint = "127.0.0.1:80"
|
||||
|
||||
[superNodeBackFill]
|
||||
on = false
|
||||
ipcPath = ""
|
||||
frequency = 5
|
||||
```
|
||||
|
||||
The additional `server.ipcPath` and `server.wsEndpoint` fields are used to set what ipc endpoint and ws url
|
||||
the `syncPublishScreenAndServe` rpc server will expose itself to subscribing transformers over, respectively.
|
||||
Any valid and available path and endpoint is acceptable, but keep in mind that this path and endpoint need to
|
||||
be known by transformers for them to subscribe to the super node.
|
||||
|
||||
Because the super node syncs data from a geth full node as it progresses through its block synchronization, there is potential
|
||||
for the super node to miss data both at the beginning of the sync due to lag between initialization of the two processes and
|
||||
anywhere throughout the sync if the processes are interrupted. The `superNodeBackFill` config mapping is used to optionally configure
|
||||
the super node with an archival geth client that exposes a `statediff.StateDiffAt` rpc endpoint, to enable it to fill in these data gaps.
|
||||
`superNodeBackFill.on` turns the backfill process on, the `superNodeBackFill.ipcPath` is the rpc path for the archival geth node, and `superNodeBackFill.frequency`
|
||||
sets at what frequency (in minutes) the backfill process checks for and fills in gaps.
|
||||
|
||||
|
||||
## Dockerfile Setup
|
||||
|
||||
The below provides step-by-step directions for how to setup the super node using the provided Dockerfile on an AWS Linux AMI instance.
|
||||
Note that the instance will need sufficient memory and storage for this to work.
|
||||
|
||||
1. Install basic dependencies
|
||||
```
|
||||
sudo yum update
|
||||
sudo yum install -y curl gpg gcc gcc-c++ make git
|
||||
```
|
||||
|
||||
2. Install Go 1.12
|
||||
```
|
||||
wget https://dl.google.com/go/go1.12.6.linux-amd64.tar.gz
|
||||
tar -xzf go1.12.6.linux-amd64.tar.gz
|
||||
sudo mv go /usr/local
|
||||
```
|
||||
|
||||
3. Edit .bash_profile to export GOPATH
|
||||
```
|
||||
export GOROOT=/usr/local/go
|
||||
export GOPATH=$HOME/go
|
||||
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
|
||||
```
|
||||
|
||||
4. Install and setup Postgres
|
||||
```
|
||||
sudo yum install postgresql postgresql96-server
|
||||
sudo service postgresql96 initdb
|
||||
sudo service postgresql96 start
|
||||
sudo -u postgres createuser -s ec2-user
|
||||
sudo -u postgres createdb ec2-user
|
||||
sudo su postgres
|
||||
psql
|
||||
ALTER USER "ec2-user" WITH SUPERUSER;
|
||||
/q
|
||||
exit
|
||||
```
|
||||
|
||||
4b. Edit hba_file to trust connections
|
||||
```
|
||||
psql
|
||||
SHOW hba_file;
|
||||
/q
|
||||
sudo vim {PATH_TO_FILE}
|
||||
```
|
||||
|
||||
4c. Stop and restart Postgres server to affect changes
|
||||
```
|
||||
sudo service postgresql96 stop
|
||||
sudo service postgresql96 start
|
||||
```
|
||||
|
||||
5. Install and start Docker (exit and re-enter ec2 instance afterwards to affect changes)
|
||||
```
|
||||
sudo yum install -y docker
|
||||
sudo service docker start
|
||||
sudo usermod -aG docker ec2-user
|
||||
```
|
||||
|
||||
6. Fetch the repository and switch to this working branch
|
||||
```
|
||||
go get github.com/vulcanize/vulcanizedb
|
||||
cd $GOPATH/src/github.com/vulcanize/vulcanizedb
|
||||
git checkout ipfs_concurrency
|
||||
```
|
||||
|
||||
7. Create the db
|
||||
```
|
||||
createdb vulcanize_public
|
||||
```
|
||||
|
||||
8. Build and run the Docker image
|
||||
```
|
||||
cd $GOPATH/src/github.com/vulcanize/vulcanizedb/dockerfiles/super_node
|
||||
docker build .
|
||||
docker run --network host -e VDB_PG_CONNECT=postgres://localhost:5432/vulcanize_public?sslmode=disable {IMAGE_ID}
|
||||
```
|
||||
|
||||
|
||||
## Subscribing
|
||||
|
||||
A transformer can subscribe to the `syncPublishScreenAndServe` service over its ipc or ws endpoints, when subscribing the transformer
|
||||
specifies which subsets of the synced data it is interested in and the server will forward only these data.
|
||||
|
||||
The `streamSubscribe` command serves as a simple demonstration/example of subscribing to the super-node feed, it subscribes with a set of parameters
|
||||
defined in the loaded config file, and prints the streamed data to stdout. To build transformers that subscribe to and use super-node data,
|
||||
the shared/libraries/streamer can be used.
|
||||
|
||||
Usage:
|
||||
|
||||
`./vulcanizedb streamSubscribe --config=<config_file.toml>`
|
||||
|
||||
The config for `streamSubscribe` has the `subscribe` set of parameters, for example:
|
||||
|
||||
```toml
|
||||
[subscription]
|
||||
path = "ws://127.0.0.1:8080"
|
||||
backfill = true
|
||||
backfillOnly = false
|
||||
startingBlock = 0
|
||||
endingBlock = 0
|
||||
[subscription.headerFilter]
|
||||
off = false
|
||||
uncles = false
|
||||
[subscription.trxFilter]
|
||||
off = false
|
||||
src = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
||||
]
|
||||
dst = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
||||
]
|
||||
[subscription.receiptFilter]
|
||||
off = false
|
||||
topic0s = [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
|
||||
]
|
||||
[subscription.stateFilter]
|
||||
off = false
|
||||
addresses = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
|
||||
]
|
||||
intermediateNodes = false
|
||||
[subscription.storageFilter]
|
||||
off = true
|
||||
addresses = [
|
||||
"",
|
||||
""
|
||||
]
|
||||
storageKeys = [
|
||||
"",
|
||||
""
|
||||
]
|
||||
intermediateNodes = false
|
||||
```
|
||||
|
||||
`subscription.path` is used to define the ws url OR ipc endpoint we will subscribe to the super-node over
|
||||
(the `server.ipcPath` or `server.wsEndpoint` that the super-node has defined in their config file).
|
||||
|
||||
`subscription.backfill` specifies whether or not the super-node should look up historical data in its cache and
|
||||
send that to the subscriber, if this is set to `false` then the super-node only forwards newly synced/incoming data.
|
||||
|
||||
`subscription.backfillOnly` will tell the super-node to only send historical data and not stream incoming data going forward.
|
||||
|
||||
`subscription.startingBlock` is the starting block number for the range we want to receive data in.
|
||||
|
||||
`subscription.endingBlock` is the ending block number for the range we want to receive data in;
|
||||
setting to 0 means there is no end/we will continue indefinitely.
|
||||
|
||||
`subscription.headerFilter` has two sub-options: `off` and `uncles`. Setting `off` to true tells the super-node to
|
||||
not send any headers to the subscriber; setting `uncles` to true tells the super-node to send uncles in addition to normal headers.
|
||||
|
||||
`subscription.trxFilter` has three sub-options: `off`, `src`, and `dst`. Setting `off` to true tells the super-node to
|
||||
not send any transactions to the subscriber; `src` and `dst` are string arrays which can be filled with ETH addresses we want to filter transactions for,
|
||||
if they have any addresses then the super-node will only send transactions that were sent or received by the addresses contained
|
||||
in `src` and `dst`, respectively.
|
||||
|
||||
`subscription.receiptFilter` has two sub-options: `off` and `topics`. Setting `off` to true tells the super-node to
|
||||
not send any receipts to the subscriber; `topic0s` is a string array which can be filled with event topics we want to filter for,
|
||||
if it has any topics then the super-node will only send receipts that contain logs which have that topic0.
|
||||
|
||||
`subscription.stateFilter` has three sub-options: `off`, `addresses`, and `intermediateNodes`. Setting `off` to true tells the super-node to
|
||||
not send any state data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter state for,
|
||||
if it has any addresses then the super-node will only send state leafs (accounts) corresponding to those account addresses. By default the super-node
|
||||
only sends along state leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.
|
||||
|
||||
`subscription.storageFilter` has four sub-options: `off`, `addresses`, `storageKeys`, and `intermediateNodes`. Setting `off` to true tells the super-node to
|
||||
not send any storage data to the subscriber; `addresses` is a string array which can be filled with ETH addresses we want to filter storage for,
|
||||
if it has any addresses then the super-node will only send storage nodes from the storage tries at those state addresses. `storageKeys` is another string
|
||||
array that can be filled with storage keys we want to filter storage data for. It is important to note that the storageKeys are the actual keccak256 hashes, whereas
|
||||
the addresses in the `addresses` fields are the ETH addresses and not their keccak256 hashes that serve as the actual state keys. By default the super-node
|
||||
only sends along storage leafs, if we want to receive branch and extension nodes as well `intermediateNodes` can be set to `true`.
|
35
environments/superNodeSubscription.toml
Normal file
35
environments/superNodeSubscription.toml
Normal file
@ -0,0 +1,35 @@
|
||||
[subscription]
|
||||
path = "ws://127.0.0.1:8080"
|
||||
backfill = true
|
||||
backfillOnly = false
|
||||
startingBlock = 0
|
||||
endingBlock = 0
|
||||
[subscription.headerFilter]
|
||||
off = false
|
||||
uncles = false
|
||||
[subscription.trxFilter]
|
||||
off = false
|
||||
src = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
||||
]
|
||||
dst = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe",
|
||||
]
|
||||
[subscription.receiptFilter]
|
||||
off = false
|
||||
contracts = []
|
||||
topic0s = [
|
||||
"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
|
||||
"0x930a61a57a70a73c2a503615b87e2e54fe5b9cdeacda518270b852296ab1a377"
|
||||
]
|
||||
[subscription.stateFilter]
|
||||
off = false
|
||||
addresses = [
|
||||
"0xde0B295669a9FD93d5F28D9Ec85E40f4cb697BAe"
|
||||
]
|
||||
intermediateNodes = false
|
||||
[subscription.storageFilter]
|
||||
off = true
|
||||
addresses = []
|
||||
storageKeys = []
|
||||
intermediateNodes = false
|
18
environments/syncPublishScreenAndServe.toml
Normal file
18
environments/syncPublishScreenAndServe.toml
Normal file
@ -0,0 +1,18 @@
|
||||
[database]
|
||||
name = "vulcanize_public"
|
||||
hostname = "localhost"
|
||||
port = 5432
|
||||
user = "ec2-user"
|
||||
|
||||
[client]
|
||||
ipcPath = "ws://127.0.0.1:8546"
|
||||
ipfsPath = "/root/.ipfs"
|
||||
|
||||
[server]
|
||||
ipcPath = "/root/.vulcanize/vulcanize.ipc"
|
||||
wsEndpoint = "127.0.0.1:8080"
|
||||
|
||||
[superNodeBackFill]
|
||||
on = false
|
||||
rpcPath = ""
|
||||
frequency = 5
|
266
go.mod
266
go.mod
@ -3,68 +3,226 @@ module github.com/vulcanize/vulcanizedb
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/allegro/bigcache v1.2.1 // indirect
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible // indirect
|
||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 // indirect
|
||||
github.com/btcsuite/btcd v0.20.0-beta // indirect
|
||||
github.com/cespare/cp v1.1.1 // indirect
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9
|
||||
github.com/Stebalien/go-bitfield v0.0.1
|
||||
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088
|
||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015
|
||||
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d
|
||||
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8
|
||||
github.com/btcsuite/goleveldb v1.0.0 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0
|
||||
github.com/cheekybits/genny v1.0.0
|
||||
github.com/coreos/go-semver v0.3.0
|
||||
github.com/cskr/pubsub v1.0.2
|
||||
github.com/dave/jennifer v1.3.0
|
||||
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||
github.com/docker/docker v1.13.1 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/elastic/gosigar v0.10.4 // indirect
|
||||
github.com/ethereum/go-ethereum v1.9.5
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-cmp v0.3.1 // indirect
|
||||
github.com/gorilla/websocket v1.4.1 // indirect
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191024035216-0a9cfbec35a1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190522120613-62389b5e4ae0
|
||||
github.com/deckarep/golang-set v1.7.1
|
||||
github.com/dgraph-io/badger/v2 v2.0.0-rc.2+incompatible
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/edsrzf/mmap-go v1.0.0
|
||||
github.com/elastic/gosigar v0.10.4
|
||||
github.com/ethereum/go-ethereum v1.9.1
|
||||
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
||||
github.com/fsnotify/fsnotify v1.4.7
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||
github.com/go-stack/stack v1.8.0
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/websocket v1.4.0
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/howeyc/fsnotify v0.9.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
github.com/huin/goupnp v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/influxdata/influxdb v1.7.9 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.1 // indirect
|
||||
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a
|
||||
github.com/karalabe/usb v0.0.0-20190819132248-550797b1cad8 // indirect
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-isatty v0.0.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/huin/goupnp v1.0.0
|
||||
github.com/inconshreveable/mousetrap v1.0.0
|
||||
github.com/ipfs/bbloom v0.0.1
|
||||
github.com/ipfs/go-bitswap v0.1.6
|
||||
github.com/ipfs/go-block-format v0.0.2
|
||||
github.com/ipfs/go-blockservice v0.1.2
|
||||
github.com/ipfs/go-cid v0.0.3
|
||||
github.com/ipfs/go-cidutil v0.0.2
|
||||
github.com/ipfs/go-datastore v0.0.5
|
||||
github.com/ipfs/go-ds-badger v0.0.5
|
||||
github.com/ipfs/go-ds-flatfs v0.0.2
|
||||
github.com/ipfs/go-ds-leveldb v0.0.2
|
||||
github.com/ipfs/go-ds-measure v0.0.1
|
||||
github.com/ipfs/go-fs-lock v0.0.1
|
||||
github.com/ipfs/go-ipfs v0.4.22
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1
|
||||
github.com/ipfs/go-ipfs-config v0.0.3
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1
|
||||
github.com/ipfs/go-ipfs-ds-help v0.0.1
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1
|
||||
github.com/ipfs/go-ipfs-files v0.0.3
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1
|
||||
github.com/ipfs/go-ipfs-pq v0.0.1
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0
|
||||
github.com/ipfs/go-ipfs-util v0.0.1
|
||||
github.com/ipfs/go-ipld-cbor v0.0.3
|
||||
github.com/ipfs/go-ipld-format v0.0.2
|
||||
github.com/ipfs/go-ipld-git v0.0.2
|
||||
github.com/ipfs/go-ipns v0.0.1
|
||||
github.com/ipfs/go-log v0.0.1
|
||||
github.com/ipfs/go-merkledag v0.1.0
|
||||
github.com/ipfs/go-metrics-interface v0.0.1
|
||||
github.com/ipfs/go-mfs v0.1.1
|
||||
github.com/ipfs/go-path v0.0.7
|
||||
github.com/ipfs/go-peertaskqueue v0.1.1
|
||||
github.com/ipfs/go-todocounter v0.0.1
|
||||
github.com/ipfs/go-unixfs v0.1.0
|
||||
github.com/ipfs/go-verifcid v0.0.1
|
||||
github.com/ipfs/interface-go-ipfs-core v0.1.0
|
||||
github.com/jackpal/gateway v1.0.5
|
||||
github.com/jackpal/go-nat-pmp v1.0.1
|
||||
github.com/jbenet/go-is-domain v1.0.2
|
||||
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2
|
||||
github.com/jbenet/goprocess v0.1.3
|
||||
github.com/jessevdk/go-flags v1.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0
|
||||
github.com/kisielk/errcheck v1.2.0 // indirect
|
||||
github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/libp2p/go-addr-util v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/libp2p/go-conn-security-multistream v0.1.0
|
||||
github.com/libp2p/go-eventbus v0.0.3
|
||||
github.com/libp2p/go-flow-metrics v0.0.1
|
||||
github.com/libp2p/go-libp2p v0.1.2
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.0
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.0
|
||||
github.com/libp2p/go-libp2p-core v0.0.9
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0
|
||||
github.com/libp2p/go-libp2p-discovery v0.1.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.1.1
|
||||
github.com/libp2p/go-libp2p-kbucket v0.2.0
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.1
|
||||
github.com/libp2p/go-libp2p-nat v0.0.4
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.0
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.1.0
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1
|
||||
github.com/libp2p/go-libp2p-record v0.1.0
|
||||
github.com/libp2p/go-libp2p-routing v0.1.0
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.1.0
|
||||
github.com/libp2p/go-libp2p-secio v0.1.1
|
||||
github.com/libp2p/go-libp2p-swarm v0.1.1
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.1
|
||||
github.com/libp2p/go-maddr-filter v0.0.5
|
||||
github.com/libp2p/go-mplex v0.1.0
|
||||
github.com/libp2p/go-msgio v0.0.4
|
||||
github.com/libp2p/go-nat v0.0.3
|
||||
github.com/libp2p/go-reuseport v0.0.1
|
||||
github.com/libp2p/go-reuseport-transport v0.0.2
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.2.0
|
||||
github.com/libp2p/go-tcp-transport v0.1.0
|
||||
github.com/libp2p/go-ws-transport v0.1.0
|
||||
github.com/libp2p/go-yamux v1.2.3
|
||||
github.com/lucas-clemente/quic-go v0.11.2
|
||||
github.com/magiconair/properties v1.8.1
|
||||
github.com/marten-seemann/qtls v0.2.4
|
||||
github.com/mattn/go-colorable v0.1.2
|
||||
github.com/mattn/go-isatty v0.0.8
|
||||
github.com/mattn/go-runewidth v0.0.4
|
||||
github.com/miekg/dns v1.1.15
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||
github.com/minio/sha256-simd v0.1.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.1 // indirect
|
||||
github.com/onsi/ginkgo v1.7.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/oschwald/maxminddb-golang v1.5.0 // indirect
|
||||
github.com/pborman/uuid v1.2.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/mmcloughlin/avo v0.0.0-20190731014047-bb615f61ce85
|
||||
github.com/mr-tron/base58 v1.1.2
|
||||
github.com/multiformats/go-base32 v0.0.3
|
||||
github.com/multiformats/go-multiaddr v0.0.4
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.3
|
||||
github.com/multiformats/go-multiaddr-fmt v0.0.1
|
||||
github.com/multiformats/go-multiaddr-net v0.0.1
|
||||
github.com/multiformats/go-multibase v0.0.1
|
||||
github.com/multiformats/go-multihash v0.0.6
|
||||
github.com/multiformats/go-multistream v0.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.1
|
||||
github.com/onsi/ginkgo v1.8.0
|
||||
github.com/onsi/gomega v1.5.0
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/pelletier/go-toml v1.4.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a
|
||||
github.com/pressly/goose v2.6.0+incompatible
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.2 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/viper v1.3.2
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 // indirect
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 // indirect
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.0.2 // indirect
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190926114937-fa1a29108794 // indirect
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65
|
||||
github.com/prometheus/tsdb v0.10.0
|
||||
github.com/rjeczalik/notify v0.9.2
|
||||
github.com/rs/cors v1.6.0
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572
|
||||
github.com/spaolacci/murmur3 v1.1.0
|
||||
github.com/spf13/afero v1.2.2
|
||||
github.com/spf13/cast v1.3.0
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/jwalterweatherman v1.1.0
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/tyler-smith/go-bip39 v1.0.0
|
||||
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f
|
||||
github.com/whyrusleeping/mafmt v1.2.8
|
||||
github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
|
||||
github.com/whyrusleeping/sql-datastore v0.0.0-20190124195324-b24eb8d0ce14
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208
|
||||
go.opencensus.io v0.22.0
|
||||
go.uber.org/atomic v1.4.0
|
||||
go.uber.org/dig v1.7.0
|
||||
go.uber.org/fx v1.9.0
|
||||
go.uber.org/multierr v1.1.0
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
google.golang.org/appengine v1.6.5 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff // indirect
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/tools v0.0.0-20190802003818-e9bb7d36c060
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
|
||||
gopkg.in/fsnotify.v1 v1.4.7
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
replace github.com/ethereum/go-ethereum => github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101
|
||||
replace github.com/dgraph-io/badger v2.0.0-rc.2+incompatible => github.com/dgraph-io/badger/v2 v2.0.0-rc.2
|
||||
|
||||
replace gopkg.in/urfave/cli.v1 => gopkg.in/urfave/cli.v1 v1.20.0
|
||||
replace github.com/dgraph-io/badger/v2 v2.0.0-rc2 => github.com/dgraph-io/badger v1.6.0-rc1
|
||||
|
||||
replace github.com/ipfs/go-ipfs v0.4.22 => github.com/vulcanize/go-ipfs v0.4.22-alpha
|
||||
|
||||
replace github.com/ipfs/go-ipfs-config v0.0.3 => github.com/vulcanize/go-ipfs-config v0.0.8-alpha
|
||||
|
||||
replace github.com/ethereum/go-ethereum v1.9.1 => github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101
|
||||
|
709
go.sum
709
go.sum
@ -1,264 +1,797 @@
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo=
|
||||
github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo=
|
||||
github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
|
||||
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible h1:z1u2gv0/rsSi/HqMDB436AiUROXXim7st5DOg4Ikl4A=
|
||||
github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU=
|
||||
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088 h1:98xHUPwc06h3/UklWP/wZjARk6fxAFEGkEZ0E1UJReo=
|
||||
github.com/allegro/bigcache v0.0.0-20190618191010-69ea0af04088/go.mod h1:qw9PmPMRP4u9TMCeXEA+M4m2lvVM+B/URHNUtxFcERc=
|
||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015 h1:7ABPr1+uJdqESAdlVevnc/2FJGiC/K3uMg1JiELeF+0=
|
||||
github.com/aristanetworks/goarista v0.0.0-20190712234253-ed1100a1c015/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/btcsuite/btcd v0.20.0-beta h1:PamBMopnHxO2nEIsU89ibVVnqnXR2yFTgGNc+PdG68o=
|
||||
github.com/btcsuite/btcd v0.20.0-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg=
|
||||
github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d/go.mod h1:Jbj8eKecMNwf0KFI75skSUZqMB4UCRcndUScVBTWyUI=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8 h1:mOg8/RgDSHTQ1R0IR+LMDuW4TDShPv+JzYHuR4GLoNA=
|
||||
github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
|
||||
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
|
||||
github.com/dave/jennifer v1.3.0 h1:p3tl41zjjCZTNBytMwrUuiAnherNUZktlhPTKoF/sEk=
|
||||
github.com/dave/jennifer v1.3.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190522120613-62389b5e4ae0 h1:t2BzsfK9SPTlddm0l5PgRQp5fBzByku985NYG1otL/U=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190522120613-62389b5e4ae0/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4=
|
||||
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
|
||||
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
|
||||
github.com/dgraph-io/badger v1.6.0-rc1 h1:JphPpoBZJ3WHha133BGYlQqltSGIhV+VsEID0++nN9A=
|
||||
github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
|
||||
github.com/dgraph-io/badger/v2 v2.0.0-rc.2+incompatible/go.mod h1:jUaIjOV835xZ/mCLG/8P/38ZxiT4bG/K1khDNZJxuwU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dlespiau/covertool v0.0.0-20180314162135-b0c4c6d0583a/go.mod h1:/eQMcW3eA1bzKx23ZYI2H3tXPdJB5JWYTHzoUPBvQY4=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/gosigar v0.10.4 h1:6jfw75dsoflhBMRdO6QPzQUgLqUYTsQQQRkkcsHsuPo=
|
||||
github.com/elastic/gosigar v0.10.4/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A=
|
||||
github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg=
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191024035216-0a9cfbec35a1 h1:jV0CRazQbnsAGKT1z8BjMvouE2pypynEjx/o7eHbkFM=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20191024035216-0a9cfbec35a1/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
|
||||
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
|
||||
github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/howeyc/fsnotify v0.9.0 h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=
|
||||
github.com/howeyc/fsnotify v0.9.0/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
|
||||
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
|
||||
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb v1.7.9 h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=
|
||||
github.com/influxdata/influxdb v1.7.9/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||
github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw=
|
||||
github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI=
|
||||
github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg=
|
||||
github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis=
|
||||
github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0=
|
||||
github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs=
|
||||
github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps=
|
||||
github.com/ipfs/go-bitswap v0.1.6 h1:3jj6/69bsqAFmNViEXU8MWUDE8iE1mrqVPaKaIChu7k=
|
||||
github.com/ipfs/go-bitswap v0.1.6/go.mod h1:oRNdV7SkA9glUUMHd6O2ztSwimBDLFdIF0fYIuDEzVo=
|
||||
github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc=
|
||||
github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=
|
||||
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
|
||||
github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI=
|
||||
github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So=
|
||||
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
|
||||
github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
|
||||
github.com/ipfs/go-blockservice v0.1.2 h1:fqFeeu1EG0lGVrqUo+BVJv7LZV31I4ZsyNthCOMAJRc=
|
||||
github.com/ipfs/go-blockservice v0.1.2/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
|
||||
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms=
|
||||
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo=
|
||||
github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s=
|
||||
github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.0.3/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo=
|
||||
github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
|
||||
github.com/ipfs/go-ds-badger v0.0.5 h1:dxKuqw5T1Jm8OuV+lchA76H9QZFyPKZeLuT6bN42hJQ=
|
||||
github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s=
|
||||
github.com/ipfs/go-ds-flatfs v0.0.2 h1:1zujtU5bPBH6B8roE+TknKIbBCrpau865xUk0dH3x2A=
|
||||
github.com/ipfs/go-ds-flatfs v0.0.2/go.mod h1:YsMGWjUieue+smePAWeH/YhHtlmEMnEGhiwIn6K6rEM=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.2 h1:P5HB59Zblym0B5XYOeEyw3YtPtbpIqQCavCSWaWEEk8=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.2/go.mod h1:CWFeBh5IAAscWyG/QRH+lJaAlnLWjsfPSNs4teyPUp0=
|
||||
github.com/ipfs/go-ds-measure v0.0.1 h1:PrCueug+yZLkDCOthZTXKinuoCal/GvlAT7cNxzr03g=
|
||||
github.com/ipfs/go-ds-measure v0.0.1/go.mod h1:wiH6bepKsgyNKpz3nyb4erwhhIVpIxnZbsjN1QpVbbE=
|
||||
github.com/ipfs/go-fs-lock v0.0.1 h1:XHX8uW4jQBYWHj59XXcjg7BHlHxV9ZOYs6Y43yb7/l0=
|
||||
github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw=
|
||||
github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw=
|
||||
github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU=
|
||||
github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo=
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM=
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew=
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
|
||||
github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.3 h1:ME+QnC3uOyla1ciRPezDW0ynQYK2ikOh9OCKAEg4uUA=
|
||||
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA=
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs=
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A=
|
||||
github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU=
|
||||
github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY=
|
||||
github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs=
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ=
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
|
||||
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
|
||||
github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8=
|
||||
github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
|
||||
github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I=
|
||||
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
|
||||
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
|
||||
github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=
|
||||
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
|
||||
github.com/ipfs/go-ipld-git v0.0.2 h1:dn5Quu9lgjkSqkc9CaTsRjzg90kaIitj9wENtigVMH8=
|
||||
github.com/ipfs/go-ipld-git v0.0.2/go.mod h1:RuvMXa9qtJpDbqngyICCU/d+cmLFXxLsbIclmD0Lcr0=
|
||||
github.com/ipfs/go-ipns v0.0.1 h1:5vX0+ehF55YWxE8Pmf4eB8szcP+fh24AXnvCkOmSLCc=
|
||||
github.com/ipfs/go-ipns v0.0.1/go.mod h1:HOiAXgGiH0wCSwsFM1IKdOy6YGT4iZafcsUKni703/g=
|
||||
github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
|
||||
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
|
||||
github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA=
|
||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.1.0 h1:CAEXjRFEDPvealQj3TgEjV1IJckwjvmxAqtq5QSXJrg=
|
||||
github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
||||
github.com/ipfs/go-mfs v0.1.1 h1:tjYEWFIl0W6vRFuM/EnySHaaYzPmDcQWwTjtYWMGQ1A=
|
||||
github.com/ipfs/go-mfs v0.1.1/go.mod h1:nk17h6kCOrfaNqXvx2VJ3SqkJ0VYUzJFAsc+BA0sqaw=
|
||||
github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo=
|
||||
github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho=
|
||||
github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno=
|
||||
github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ=
|
||||
github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U=
|
||||
github.com/ipfs/go-peertaskqueue v0.1.1 h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY=
|
||||
github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U=
|
||||
github.com/ipfs/go-todocounter v0.0.1 h1:kITWA5ZcQZfrUnDNkRn04Xzh0YFaDFXsoO2A81Eb6Lw=
|
||||
github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4=
|
||||
github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8=
|
||||
github.com/ipfs/go-unixfs v0.1.0 h1:KkjcfqObdNwUN8heMtt5OdrgrRKYTIWEvpGl1bDYIho=
|
||||
github.com/ipfs/go-unixfs v0.1.0/go.mod h1:lysk5ELhOso8+Fed9U1QTGey2ocsfaZ18h0NCO2Fj9s=
|
||||
github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=
|
||||
github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0=
|
||||
github.com/ipfs/interface-go-ipfs-core v0.1.0 h1:4LD2TJThswXVMJgAji9k9PyPsOGNtmdcx7U9RM1xH84=
|
||||
github.com/ipfs/interface-go-ipfs-core v0.1.0/go.mod h1:h1zJvvfh9dcNU0bK+Jag516LputHLKQkHsDP+z0dz4A=
|
||||
github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
|
||||
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs=
|
||||
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
|
||||
github.com/jbenet/go-is-domain v1.0.2 h1:11r5MSptcNFZyBoqubBQnVMUKRWLuRjL1banaIk+iYo=
|
||||
github.com/jbenet/go-is-domain v1.0.2/go.mod h1:xbRLRb0S7FgzDBTJlguhDVwLYM/5yNtvktxj2Ttfy7Q=
|
||||
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A=
|
||||
github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs=
|
||||
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
|
||||
github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=
|
||||
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a h1:Jyg5PpIc1nLGrNDM5blVkiSySmRhaD/IiXkvaHzBYnw=
|
||||
github.com/jmoiron/sqlx v0.0.0-20181024163419-82935fac6c1a/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85 h1:+LZtdhpMITOXE+MztQPPcwUl+eqYjwlXXLHrd0yWlxw=
|
||||
github.com/jmoiron/sqlx v0.0.0-20190426154859-38398a30ed85/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karalabe/usb v0.0.0-20190819132248-550797b1cad8 h1:VhnqxaTIudc9IWKx8uXRLnpdSb9noCEj+vHacjmhp68=
|
||||
github.com/karalabe/usb v0.0.0-20190819132248-550797b1cad8/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0 h1:S8kWZLXHpcOq3nGAvIs0oDgd4CXxkxE3hkDVRjTu7ro=
|
||||
github.com/karalabe/usb v0.0.0-20190703133951-9be757f914c0/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ=
|
||||
github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88=
|
||||
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
|
||||
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk=
|
||||
github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE=
|
||||
github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE=
|
||||
github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0=
|
||||
github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc=
|
||||
github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk=
|
||||
github.com/libp2p/go-eventbus v0.0.3 h1:4sB0NrwnWr6qGeq2RWUp/JG1wNajf6gyILInId72hrw=
|
||||
github.com/libp2p/go-eventbus v0.0.3/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk=
|
||||
github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s=
|
||||
github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8=
|
||||
github.com/libp2p/go-libp2p v0.0.2/go.mod h1:Qu8bWqFXiocPloabFGUcVG4kk94fLvfC8mWTDdFC9wE=
|
||||
github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A=
|
||||
github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM=
|
||||
github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8=
|
||||
github.com/libp2p/go-libp2p v0.1.2 h1:TAs/A6XIB4l6GWSyamDdluGSjM4A7sHKyZ3xT8febMc=
|
||||
github.com/libp2p/go-libp2p v0.1.2/go.mod h1:5nXHmf4Hs+NmkaMsmWcFJgUHTbYNpCfxr20lwus0p1c=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0 h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU=
|
||||
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0 h1:28IM7iWMDclZeVkpiFQaWVANwXwE7zLlpbnS7yXxrfs=
|
||||
github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro=
|
||||
github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg=
|
||||
github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE=
|
||||
github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.0 h1:eniLL3Y9aq/sryfyV1IAHj5rlvuyj3b7iz8tSiZpdhY=
|
||||
github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.0 h1:vp0t0F0EuT3rrlTtnMnIyyzCnly7nIlRoEbhJpgp0qU=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.1.0/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk=
|
||||
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
|
||||
github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I=
|
||||
github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE=
|
||||
github.com/libp2p/go-libp2p-core v0.0.9 h1:Dt0Glhajkwj1zMYRoY0nbVBI7pyRYNLDaKCwss2Jd4I=
|
||||
github.com/libp2p/go-libp2p-core v0.0.9/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE=
|
||||
github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE=
|
||||
github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=
|
||||
github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI=
|
||||
github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI=
|
||||
github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I=
|
||||
github.com/libp2p/go-libp2p-discovery v0.1.0 h1:j+R6cokKcGbnZLf4kcNwpx6mDEUPF3N6SrqMymQhmvs=
|
||||
github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g=
|
||||
github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go=
|
||||
github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8=
|
||||
github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k=
|
||||
github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k=
|
||||
github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k=
|
||||
github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.1.1 h1:IH6NQuoUv5w5e1O8Jc3KyVDtr0rNd0G9aaADpLI1xVo=
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.1.1/go.mod h1:1kj2Rk5pX3/0RwqMm9AMNCT7DzcMHYhgDN5VTi+cY0M=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.2.0 h1:FB2a0VkOTNGTP5gu/I444u4WabNM9V1zCkQcWb7zajI=
|
||||
github.com/libp2p/go-libp2p-kbucket v0.2.0/go.mod h1:JNymBToym3QXKBMKGy3m29+xprg0EVr/GJFHxFEdgh8=
|
||||
github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg=
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=
|
||||
github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90=
|
||||
github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08=
|
||||
github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI=
|
||||
github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw=
|
||||
github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY=
|
||||
github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
|
||||
github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo=
|
||||
github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es=
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=
|
||||
github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.1/go.mod h1:ojEWnwG7JpJLkJ9REWYXQslyu9ZLrPWPEcCdiZzEbSM=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI=
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0 h1:kRUES28dktfnHNIRW4Ro78F7rKBHBiw5MJpl0ikrLIA=
|
||||
github.com/libp2p/go-libp2p-pnet v0.1.0/go.mod h1:ZkyZw3d0ZFOex71halXRihWf9WH/j3OevcJdTmD0lyE=
|
||||
github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s=
|
||||
github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.0 h1:SmQeMa7IUv5vadh0fYgYsafWCBA1sCy5d/68kIYqGcU=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.1.0/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q=
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.1.0 h1:xA5B8Sdx64tNlSRIcay2QUngtlu8LpUJClaUk/dYYrg=
|
||||
github.com/libp2p/go-libp2p-pubsub-router v0.1.0/go.mod h1:PnHOshBr/2I2ZxVfEsqfgCQPsVg09zo+DhSlWkOhPFM=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1 h1:MFMJzvsxIEDEVKzO89BnB/FgvMj9WI4GDGUW2ArDPUA=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU=
|
||||
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
|
||||
github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc=
|
||||
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
|
||||
github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys=
|
||||
github.com/libp2p/go-libp2p-routing v0.1.0 h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU=
|
||||
github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.1.0 h1:BaFvpyv8TyhCN7TihawTiKuzeu8/Pyw7ZnMA4IvqIN8=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.1.0/go.mod h1:oUs0h39vNwYtYXnQWOTU5BaafbedSyWCCal3gqHuoOQ=
|
||||
github.com/libp2p/go-libp2p-secio v0.0.1/go.mod h1:IdG6iQybdcYmbTzxp4J5dwtUEDTOvZrT0opIDVNPrJs=
|
||||
github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0=
|
||||
github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8=
|
||||
github.com/libp2p/go-libp2p-secio v0.1.1 h1:NQ9nTGmyf7/pgiVdThRrZylPsOQObVMyhT1XG9sEyOQ=
|
||||
github.com/libp2p/go-libp2p-secio v0.1.1/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.1/go.mod h1:mh+KZxkbd3lQnveQ3j2q60BM1Cw2mX36XXQqwfPOShs=
|
||||
github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8=
|
||||
github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4=
|
||||
github.com/libp2p/go-libp2p-swarm v0.1.1 h1:QW7pjyTRIxt9yyBid52YmMRGtkFXUE/rbEVWsQ0ae+w=
|
||||
github.com/libp2p/go-libp2p-swarm v0.1.1/go.mod h1:4NVJaLwq/dr5kEq79Jo6pMin7ZFwLx73ln1FTefR91Q=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0 h1:o4bjjAdnUjNgJoPoDd0wUaZH7K+EenlNWJpgyXB3ulA=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0/go.mod h1:VZdoSWQDeNpIIAFJFv+6uqTqpnIIDHcqZQSTC/A1TT0=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m+JmZCe5RUXG10UMEx4kWe9Ipj5c=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
|
||||
github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8=
|
||||
github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI=
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI=
|
||||
github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
||||
github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q=
|
||||
github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg=
|
||||
github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M=
|
||||
github.com/libp2p/go-mplex v0.0.1/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0=
|
||||
github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0=
|
||||
github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0=
|
||||
github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0=
|
||||
github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU=
|
||||
github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=
|
||||
github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ=
|
||||
github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI=
|
||||
github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI=
|
||||
github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=
|
||||
github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4=
|
||||
github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
|
||||
github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14=
|
||||
github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ=
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw=
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg=
|
||||
github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc=
|
||||
github.com/libp2p/go-tcp-transport v0.0.1/go.mod h1:mnjg0o0O5TmXUaUIanYPUqkW4+u6mK0en8rlpA6BBTs=
|
||||
github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o=
|
||||
github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o=
|
||||
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
|
||||
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
|
||||
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
||||
github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww=
|
||||
github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU=
|
||||
github.com/libp2p/go-ws-transport v0.1.0 h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4=
|
||||
github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo=
|
||||
github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI=
|
||||
github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
github.com/lucas-clemente/quic-go v0.11.2 h1:Mop0ac3zALaBR3wGs6j8OYe/tcFvFsxTUFMkE/7yUOI=
|
||||
github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
|
||||
github.com/marten-seemann/qtls v0.2.4 h1:mCJ6i1jAqcsm9XODrSGvXECodoAb1STta+TkxJCwCnE=
|
||||
github.com/marten-seemann/qtls v0.2.4/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI=
|
||||
github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM=
|
||||
github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mmcloughlin/avo v0.0.0-20190731014047-bb615f61ce85/go.mod h1:lf5GMZxA5kz8dnCweJuER5Rmbx6dDu6qvw0fO3uYKK8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
||||
github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
|
||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
||||
github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
|
||||
github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
|
||||
github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4=
|
||||
github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44=
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.3 h1:P19q/k9jwmtgh+qXFkKfgFM7rCg/9l5AVqh7VNxSXhs=
|
||||
github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q=
|
||||
github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g=
|
||||
github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU=
|
||||
github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
|
||||
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
|
||||
github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po=
|
||||
github.com/multiformats/go-multihash v0.0.6 h1:cAVKO4epVd+SSpYJQD6d3vbdqQJvsrtGbTGzsp+V094=
|
||||
github.com/multiformats/go-multihash v0.0.6/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM=
|
||||
github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=
|
||||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/oschwald/maxminddb-golang v1.5.0 h1:rmyoIV6z2/s9TCJedUuDiKht2RN12LWJ1L7iRGtWY64=
|
||||
github.com/oschwald/maxminddb-golang v1.5.0/go.mod h1:3jhIUymTJ5VREKyIhWm66LJiQt04F0UCDdodShpjWsY=
|
||||
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
|
||||
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a h1:TdavzKWkPcC2G+6rKJclm/JfrWC6WZFfLUR7EJJX8MA=
|
||||
github.com/polydawn/refmt v0.0.0-20190731040541-eff0b363297a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
|
||||
github.com/pressly/goose v2.6.0+incompatible h1:3f8zIQ8rfgP9tyI0Hmcs2YNAqUCL1c+diLe3iU8Qd/k=
|
||||
github.com/pressly/goose v2.6.0+incompatible/go.mod h1:m+QHWCqxR3k8D9l7qfzuC/djtlfzxr34mozWDYEu1z8=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
|
||||
github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
|
||||
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
|
||||
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0=
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw=
|
||||
github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f h1:T7YHzO3/eqD/kv5m9+TLM4XuEAkN7NPj5pnZHqaOo/Q=
|
||||
github.com/steakknife/bloomfilter v0.0.0-20180906043351-99ee86d9200f/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2 h1:o6NMd68tuqfQ0ZFnz2d16xzFNLWxrCvqF40InOJJHSM=
|
||||
github.com/steakknife/hamming v0.0.0-20180906055317-003c143a81c2/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=
|
||||
github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tyler-smith/go-bip39 v1.0.0 h1:FOHg9gaQLeBBRbHE/QrTLfEiBHy5pQ/yXzf9JG5pYFM=
|
||||
github.com/tyler-smith/go-bip39 v1.0.0/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101 h1:fsHhBzscAwi4u7/F033SFJwTIz+46D8uDWMu2/ZdvzA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa h1:hCBMOksqjQ38BAl+jH47wWtcQJsyALeTTlQnqWi9Cog=
|
||||
github.com/vulcanize/eth-block-extractor v0.0.0-20190801172153-2835f21156aa/go.mod h1:+c+U08Q9eVV/X45zrCEu1RU1lYFI4qIhPGn/WpCmrV4=
|
||||
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101 h1:fsHhBzscAwi4u7/F033SFJwTIz+46D8uDWMu2/ZdvzA=
|
||||
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101/go.mod h1:9i0pGnKDUFFr8yC/n8xyrNBVfhYlpwE8J3Ge6ThKvug=
|
||||
github.com/vulcanize/go-ethereum v0.0.0-20190731183759-8e20673bd101/go.mod h1:9i0pGnKDUFFr8yC/n8xyrNBVfhYlpwE8J3Ge6ThKvug=
|
||||
github.com/vulcanize/go-ipfs v0.4.22-alpha h1:W+6njT14KWllMhABRFtPndqHw8SHCt5SqD4YX528kxM=
|
||||
github.com/vulcanize/go-ipfs v0.4.22-alpha/go.mod h1:uaekWWeoaA0A9Dv1LObOKCSh9kIzTpZ5RbKW4g5CQHE=
|
||||
github.com/vulcanize/go-ipfs-config v0.0.8-alpha h1:peaFvbEcPShF6ymOd8flqKkFz4YfcrNr/UOO7FmbWoQ=
|
||||
github.com/vulcanize/go-ipfs-config v0.0.8-alpha/go.mod h1:IGkVTacurWv9WFKc7IBPjHGM/7hi6+PEClqUb/l2BIM=
|
||||
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4=
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg=
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8=
|
||||
github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4=
|
||||
github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ=
|
||||
github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI=
|
||||
github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI=
|
||||
github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=
|
||||
github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA=
|
||||
github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30 h1:nMCC9Pwz1pxfC1Y6mYncdk+kq8d5aLx0Q+/gyZGE44M=
|
||||
github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
|
||||
github.com/whyrusleeping/sql-datastore v0.0.0-20190124195324-b24eb8d0ce14 h1:ILaHWBESRj0gO//RyMMdqkF8zfkadL65R2OCvtwgg7s=
|
||||
github.com/whyrusleeping/sql-datastore v0.0.0-20190124195324-b24eb8d0ce14/go.mod h1:AIjFk9HdZ+ckQA1YKhgTE84wKFiFrLLU151VZLMvFLY=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
|
||||
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk=
|
||||
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/dig v1.7.0 h1:E5/L92iQTNJTjfgJF2KgU+/JpMaiuvK2DHLBj0+kSZk=
|
||||
go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg=
|
||||
go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY=
|
||||
go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d h1:JkRdGP3zvTtTbabWSAC6n67ka30y7gOzWAah4XYJSfw=
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/arch v0.0.0-20181203225421-5a4828bb7045/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190926114937-fa1a29108794 h1:4Yo9XtTfxfBCecLiBW8TYsFIdN7TkDhjGLWetFo4JSo=
|
||||
golang.org/x/crypto v0.0.0-20190926114937-fa1a29108794/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181217023233-e147a9138326/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190106171756-3ef68632349c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190802003818-e9bb7d36c060/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff h1:uuol9OUzSvZntY1v963NAbVd7A+PHLMz1FlCe3Lorcs=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190709231704-1e4459ed25ff/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
84
libraries/shared/streamer/super_node_streamer.go
Normal file
84
libraries/shared/streamer/super_node_streamer.go
Normal file
@ -0,0 +1,84 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Streamer is used by watchers to stream eth data from a vulcanizedb super node
|
||||
package streamer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
// ISuperNodeStreamer is the interface for streaming SuperNodePayloads from a vulcanizeDB super node
|
||||
type ISuperNodeStreamer interface {
|
||||
Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error)
|
||||
}
|
||||
|
||||
// SuperNodeStreamer is the underlying struct for the ISuperNodeStreamer interface
|
||||
type SuperNodeStreamer struct {
|
||||
Client core.RPCClient
|
||||
}
|
||||
|
||||
// NewSuperNodeStreamer creates a pointer to a new SuperNodeStreamer which satisfies the ISuperNodeStreamer interface
|
||||
func NewSuperNodeStreamer(client core.RPCClient) *SuperNodeStreamer {
|
||||
return &SuperNodeStreamer{
|
||||
Client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Stream is the main loop for subscribing to data from a vulcanizedb super node
|
||||
func (sds *SuperNodeStreamer) Stream(payloadChan chan SuperNodePayload, streamFilters config.Subscription) (*rpc.ClientSubscription, error) {
|
||||
return sds.Client.Subscribe("vdb", payloadChan, "stream", streamFilters)
|
||||
}
|
||||
|
||||
// Payload holds the data returned from the super node to the requesting client
|
||||
type SuperNodePayload struct {
|
||||
BlockNumber *big.Int `json:"blockNumber"`
|
||||
HeadersRlp [][]byte `json:"headersRlp"`
|
||||
UnclesRlp [][]byte `json:"unclesRlp"`
|
||||
TransactionsRlp [][]byte `json:"transactionsRlp"`
|
||||
ReceiptsRlp [][]byte `json:"receiptsRlp"`
|
||||
StateNodesRlp map[common.Hash][]byte `json:"stateNodesRlp"`
|
||||
StorageNodesRlp map[common.Hash]map[common.Hash][]byte `json:"storageNodesRlp"`
|
||||
ErrMsg string `json:"errMsg"`
|
||||
|
||||
encoded []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (sd *SuperNodePayload) ensureEncoded() {
|
||||
if sd.encoded == nil && sd.err == nil {
|
||||
sd.encoded, sd.err = json.Marshal(sd)
|
||||
}
|
||||
}
|
||||
|
||||
// Length to implement Encoder interface for StateDiff
|
||||
func (sd *SuperNodePayload) Length() int {
|
||||
sd.ensureEncoded()
|
||||
return len(sd.encoded)
|
||||
}
|
||||
|
||||
// Encode to implement Encoder interface for StateDiff
|
||||
func (sd *SuperNodePayload) Encode() ([]byte, error) {
|
||||
sd.ensureEncoded()
|
||||
return sd.encoded, sd.err
|
||||
}
|
31
libraries/shared/transformer/super_node_transformer.go
Normal file
31
libraries/shared/transformer/super_node_transformer.go
Normal file
@ -0,0 +1,31 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package transformer
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type SuperNodeTransformer interface {
|
||||
Init() error
|
||||
Execute() error
|
||||
GetConfig() config.Subscription
|
||||
}
|
||||
|
||||
type SuperNodeTransformerInitializer func(db *postgres.DB, subCon config.Subscription, client core.RPCClient) SuperNodeTransformer
|
@ -17,10 +17,11 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config struct for generic contract transformer
|
||||
|
@ -31,5 +31,9 @@ func DbConnectionString(dbConfig Database) string {
|
||||
return fmt.Sprintf("postgresql://%s:%s@%s:%d/%s?sslmode=disable",
|
||||
dbConfig.User, dbConfig.Password, dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
|
||||
}
|
||||
if len(dbConfig.User) > 0 && len(dbConfig.Password) == 0 {
|
||||
return fmt.Sprintf("postgresql://%s@%s:%d/%s?sslmode=disable",
|
||||
dbConfig.User, dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
|
||||
}
|
||||
return fmt.Sprintf("postgresql://%s:%d/%s?sslmode=disable", dbConfig.Hostname, dbConfig.Port, dbConfig.Name)
|
||||
}
|
||||
|
@ -17,11 +17,12 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var allDifferentPathsConfig = config.Plugin{
|
||||
|
62
pkg/config/subscription.go
Normal file
62
pkg/config/subscription.go
Normal file
@ -0,0 +1,62 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package config
|
||||
|
||||
import "math/big"
|
||||
|
||||
// Subscription config is used by a subscribing transformer to specifiy which data to receive from the super node
|
||||
type Subscription struct {
|
||||
BackFill bool
|
||||
BackFillOnly bool
|
||||
StartingBlock *big.Int
|
||||
EndingBlock *big.Int // set to 0 or a negative value to have no ending block
|
||||
HeaderFilter HeaderFilter
|
||||
TrxFilter TrxFilter
|
||||
ReceiptFilter ReceiptFilter
|
||||
StateFilter StateFilter
|
||||
StorageFilter StorageFilter
|
||||
}
|
||||
|
||||
type HeaderFilter struct {
|
||||
Off bool
|
||||
Uncles bool
|
||||
}
|
||||
|
||||
type TrxFilter struct {
|
||||
Off bool
|
||||
Src []string
|
||||
Dst []string
|
||||
}
|
||||
|
||||
type ReceiptFilter struct {
|
||||
Off bool
|
||||
Contracts []string
|
||||
Topic0s []string
|
||||
}
|
||||
|
||||
type StateFilter struct {
|
||||
Off bool
|
||||
Addresses []string // is converted to state key by taking its keccak256 hash
|
||||
IntermediateNodes bool
|
||||
}
|
||||
|
||||
type StorageFilter struct {
|
||||
Off bool
|
||||
Addresses []string
|
||||
StorageKeys []string
|
||||
IntermediateNodes bool
|
||||
}
|
@ -64,7 +64,7 @@ func (r *blockRetriever) retrieveFirstBlockFromReceipts(contractAddr string) (in
|
||||
}
|
||||
err := r.db.Get(
|
||||
&firstBlock,
|
||||
`SELECT number FROM blocks
|
||||
`SELECT number FROM eth_blocks
|
||||
WHERE id = (SELECT block_id FROM full_sync_receipts
|
||||
WHERE contract_address_id = $1
|
||||
ORDER BY block_id ASC
|
||||
@ -92,7 +92,7 @@ func (r *blockRetriever) RetrieveMostRecentBlock() (int64, error) {
|
||||
var lastBlock int64
|
||||
err := r.db.Get(
|
||||
&lastBlock,
|
||||
"SELECT number FROM blocks ORDER BY number DESC LIMIT 1",
|
||||
"SELECT number FROM eth_blocks ORDER BY number DESC LIMIT 1",
|
||||
)
|
||||
|
||||
return lastBlock, err
|
||||
|
@ -175,34 +175,6 @@ func SetupTusdContract(wantedEvents, wantedMethods []string) *contract.Contract
|
||||
}.Init()
|
||||
}
|
||||
|
||||
func SetupENSRepo(vulcanizeLogID *int64, wantedEvents, wantedMethods []string) (*postgres.DB, *contract.Contract) {
|
||||
db, err := postgres.NewDB(config.Database{
|
||||
Hostname: "localhost",
|
||||
Name: "vulcanize_testing",
|
||||
Port: 5432,
|
||||
}, core.Node{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
receiptRepository := repositories.FullSyncReceiptRepository{DB: db}
|
||||
logRepository := repositories.FullSyncLogRepository{DB: db}
|
||||
blockRepository := *repositories.NewBlockRepository(db)
|
||||
|
||||
blockNumber := rand.Int63()
|
||||
blockID := CreateBlock(blockNumber, blockRepository)
|
||||
|
||||
receipts := []core.Receipt{{Logs: []core.FullSyncLog{{}}}}
|
||||
|
||||
err = receiptRepository.CreateReceiptsAndLogs(blockID, receipts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = logRepository.Get(vulcanizeLogID, `SELECT id FROM full_sync_logs`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info := SetupENSContract(wantedEvents, wantedMethods)
|
||||
|
||||
return db, info
|
||||
}
|
||||
|
||||
func SetupENSContract(wantedEvents, wantedMethods []string) *contract.Contract {
|
||||
p := mocks.NewParser(constants.ENSAbiString)
|
||||
err := p.Parse(constants.EnsContractAddress)
|
||||
@ -265,7 +237,7 @@ func TearDown(db *postgres.DB) {
|
||||
_, err = tx.Exec(`DELETE FROM addresses`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = tx.Exec(`DELETE FROM blocks`)
|
||||
_, err = tx.Exec(`DELETE FROM eth_blocks`)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = tx.Exec(`DELETE FROM headers`)
|
||||
|
@ -17,9 +17,10 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/eth/client"
|
||||
|
@ -18,6 +18,7 @@ package ethereum
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
|
@ -29,8 +29,6 @@ type DB struct {
|
||||
NodeID int64
|
||||
}
|
||||
|
||||
var ()
|
||||
|
||||
func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) {
|
||||
connectString := config.DbConnectionString(databaseConfig)
|
||||
db, connectErr := sqlx.Connect("postgres", connectString)
|
||||
|
@ -19,9 +19,10 @@ package postgres_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io/ioutil"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
@ -19,8 +19,10 @@ package repositories
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
@ -43,7 +45,7 @@ func NewBlockRepository(database *postgres.DB) *BlockRepository {
|
||||
func (blockRepository BlockRepository) SetBlocksStatus(chainHead int64) error {
|
||||
cutoff := chainHead - blocksFromHeadBeforeFinal
|
||||
_, err := blockRepository.database.Exec(`
|
||||
UPDATE blocks SET is_final = TRUE
|
||||
UPDATE eth_blocks SET is_final = TRUE
|
||||
WHERE is_final = FALSE AND number < $1`,
|
||||
cutoff)
|
||||
|
||||
@ -74,7 +76,7 @@ func (blockRepository BlockRepository) MissingBlockNumbers(startingBlockNumber i
|
||||
FROM (
|
||||
SELECT generate_series($1::INT, $2::INT) AS all_block_numbers) series
|
||||
WHERE all_block_numbers NOT IN (
|
||||
SELECT number FROM blocks WHERE eth_node_fingerprint = $3
|
||||
SELECT number FROM eth_blocks WHERE eth_node_fingerprint = $3
|
||||
) `,
|
||||
startingBlockNumber,
|
||||
highestBlockNumber, nodeID)
|
||||
@ -102,7 +104,7 @@ func (blockRepository BlockRepository) GetBlock(blockNumber int64) (core.Block,
|
||||
extra_data,
|
||||
reward,
|
||||
uncles_reward
|
||||
FROM blocks
|
||||
FROM eth_blocks
|
||||
WHERE eth_node_id = $1 AND number = $2`, blockRepository.database.NodeID, blockNumber)
|
||||
savedBlock, err := blockRepository.loadBlock(blockRows)
|
||||
if err != nil {
|
||||
@ -124,7 +126,7 @@ func (blockRepository BlockRepository) insertBlock(block core.Block) (int64, err
|
||||
return 0, postgres.ErrBeginTransactionFailed(beginErr)
|
||||
}
|
||||
insertBlockErr := tx.QueryRow(
|
||||
`INSERT INTO blocks
|
||||
`INSERT INTO eth_blocks
|
||||
(eth_node_id, number, gas_limit, gas_used, time, difficulty, hash, nonce, parent_hash, size, uncle_hash, is_final, miner, extra_data, reward, uncles_reward, eth_node_fingerprint)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
|
||||
RETURNING id `,
|
||||
@ -260,7 +262,7 @@ func (blockRepository BlockRepository) getBlockHash(block core.Block) (string, b
|
||||
// TODO: handle possible error
|
||||
blockRepository.database.Get(&retrievedBlockHash,
|
||||
`SELECT hash
|
||||
FROM blocks
|
||||
FROM eth_blocks
|
||||
WHERE number = $1 AND eth_node_id = $2`,
|
||||
block.Number, blockRepository.database.NodeID)
|
||||
return retrievedBlockHash, blockExists(retrievedBlockHash)
|
||||
@ -287,7 +289,7 @@ func blockExists(retrievedBlockHash string) bool {
|
||||
|
||||
func (blockRepository BlockRepository) removeBlock(blockNumber int64) error {
|
||||
_, err := blockRepository.database.Exec(
|
||||
`DELETE FROM blocks WHERE number=$1 AND eth_node_id=$2`,
|
||||
`DELETE FROM eth_blocks WHERE number=$1 AND eth_node_id=$2`,
|
||||
blockNumber, blockRepository.database.NodeID)
|
||||
if err != nil {
|
||||
return postgres.ErrDBDeleteFailed(err)
|
||||
|
@ -18,11 +18,12 @@ package repositories_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -17,9 +17,10 @@
|
||||
package repositories_test
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"sort"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
|
@ -18,6 +18,7 @@ package repositories
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/repository"
|
||||
|
@ -17,10 +17,11 @@
|
||||
package repositories_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
@ -18,10 +18,10 @@ package eth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -18,11 +18,12 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
type EthClient struct {
|
||||
|
@ -24,11 +24,13 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// RPCClient is a wrapper around the geth RPC client
|
||||
type RPCClient struct {
|
||||
client *rpc.Client
|
||||
ipcPath string
|
||||
}
|
||||
|
||||
// BatchElem is a struct to hold the elements of a BatchCall
|
||||
type BatchElem struct {
|
||||
Method string
|
||||
Args []interface{}
|
||||
@ -36,6 +38,7 @@ type BatchElem struct {
|
||||
Error error
|
||||
}
|
||||
|
||||
// NewRPCClient creates a new RpcClient
|
||||
func NewRPCClient(client *rpc.Client, ipcPath string) RPCClient {
|
||||
return RPCClient{
|
||||
client: client,
|
||||
@ -43,6 +46,7 @@ func NewRPCClient(client *rpc.Client, ipcPath string) RPCClient {
|
||||
}
|
||||
}
|
||||
|
||||
// CallContext makes an rpc method call with the provided context and arguments
|
||||
func (client RPCClient) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
|
||||
//If an empty interface (or other nil object) is passed to CallContext, when the JSONRPC message is created the params will
|
||||
//be interpreted as [null]. This seems to work fine for most of the ethereum clients (which presumably ignore a null parameter.
|
||||
@ -70,7 +74,6 @@ func (client RPCClient) BatchCall(batch []BatchElem) error {
|
||||
Args: batchElem.Args,
|
||||
Error: batchElem.Error,
|
||||
}
|
||||
|
||||
rpcBatch = append(rpcBatch, newBatchElem)
|
||||
}
|
||||
return client.client.BatchCall(rpcBatch)
|
||||
|
@ -18,11 +18,12 @@ package cold_import
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/crypto"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fs"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -18,6 +18,7 @@ package cold_import_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -17,11 +17,12 @@
|
||||
package cold_db
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ColdDbTransactionConverter struct{}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
|
@ -18,6 +18,7 @@ package common_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
|
@ -20,11 +20,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
@ -17,9 +17,10 @@
|
||||
package fakes
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/gomega"
|
||||
|
@ -20,9 +20,10 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -18,6 +18,7 @@ package history
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
|
@ -17,11 +17,12 @@
|
||||
package history_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/history"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var _ = Describe("Populating blocks", func() {
|
||||
|
@ -18,6 +18,7 @@ package history
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
@ -20,9 +20,10 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"math/big"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/history"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var _ = Describe("Validation window", func() {
|
||||
|
175
pkg/ipfs/converter.go
Normal file
175
pkg/ipfs/converter.go
Normal file
@ -0,0 +1,175 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
)
|
||||
|
||||
// PayloadConverter interface is used to convert a geth statediff.Payload to our IPLDPayload type
|
||||
type PayloadConverter interface {
|
||||
Convert(payload statediff.Payload) (*IPLDPayload, error)
|
||||
}
|
||||
|
||||
// Converter is the underlying struct for the PayloadConverter interface
|
||||
type Converter struct {
|
||||
chainConfig *params.ChainConfig
|
||||
}
|
||||
|
||||
// NewPayloadConverter creates a pointer to a new Converter which satisfies the PayloadConverter interface
|
||||
func NewPayloadConverter(chainConfig *params.ChainConfig) *Converter {
|
||||
return &Converter{
|
||||
chainConfig: chainConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
||||
func (pc *Converter) Convert(payload statediff.Payload) (*IPLDPayload, error) {
|
||||
// Unpack block rlp to access fields
|
||||
block := new(types.Block)
|
||||
decodeErr := rlp.DecodeBytes(payload.BlockRlp, block)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
header := block.Header()
|
||||
headerRlp, encodeErr := rlp.EncodeToBytes(header)
|
||||
if encodeErr != nil {
|
||||
return nil, encodeErr
|
||||
}
|
||||
trxLen := len(block.Transactions())
|
||||
convertedPayload := &IPLDPayload{
|
||||
BlockHash: block.Hash(),
|
||||
BlockNumber: block.Number(),
|
||||
HeaderRLP: headerRlp,
|
||||
BlockBody: block.Body(),
|
||||
TrxMetaData: make([]*TrxMetaData, 0, trxLen),
|
||||
Receipts: make(types.Receipts, 0, trxLen),
|
||||
ReceiptMetaData: make([]*ReceiptMetaData, 0, trxLen),
|
||||
StateNodes: make(map[common.Hash]StateNode),
|
||||
StorageNodes: make(map[common.Hash][]StorageNode),
|
||||
}
|
||||
signer := types.MakeSigner(pc.chainConfig, block.Number())
|
||||
transactions := block.Transactions()
|
||||
for _, trx := range transactions {
|
||||
// Extract to and from data from the the transactions for indexing
|
||||
from, senderErr := types.Sender(signer, trx)
|
||||
if senderErr != nil {
|
||||
return nil, senderErr
|
||||
}
|
||||
txMeta := &TrxMetaData{
|
||||
Dst: handleNullAddr(trx.To()),
|
||||
Src: handleNullAddr(&from),
|
||||
}
|
||||
// txMeta will have same index as its corresponding trx in the convertedPayload.BlockBody
|
||||
convertedPayload.TrxMetaData = append(convertedPayload.TrxMetaData, txMeta)
|
||||
}
|
||||
|
||||
// Decode receipts for this block
|
||||
receipts := make(types.Receipts, 0)
|
||||
decodeErr = rlp.DecodeBytes(payload.ReceiptsRlp, &receipts)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
// Derive any missing fields
|
||||
deriveErr := receipts.DeriveFields(pc.chainConfig, block.Hash(), block.NumberU64(), block.Transactions())
|
||||
if deriveErr != nil {
|
||||
return nil, deriveErr
|
||||
}
|
||||
for i, receipt := range receipts {
|
||||
// If the transaction for this receipt has a "to" address, the above DeriveFields() fails to assign it to the receipt's ContractAddress
|
||||
// If it doesn't have a "to" address, it correctly derives it and assigns it to to the receipt's ContractAddress
|
||||
// Weird, right?
|
||||
if transactions[i].To() != nil {
|
||||
receipt.ContractAddress = *transactions[i].To()
|
||||
}
|
||||
// Extract topic0 data from the receipt's logs for indexing
|
||||
rctMeta := &ReceiptMetaData{
|
||||
Topic0s: make([]string, 0, len(receipt.Logs)),
|
||||
ContractAddress: receipt.ContractAddress.Hex(),
|
||||
}
|
||||
for _, log := range receipt.Logs {
|
||||
if len(log.Topics) < 1 {
|
||||
continue
|
||||
}
|
||||
rctMeta.Topic0s = append(rctMeta.Topic0s, log.Topics[0].Hex())
|
||||
}
|
||||
// receipt and rctMeta will have same indexes
|
||||
convertedPayload.Receipts = append(convertedPayload.Receipts, receipt)
|
||||
convertedPayload.ReceiptMetaData = append(convertedPayload.ReceiptMetaData, rctMeta)
|
||||
}
|
||||
|
||||
// Unpack state diff rlp to access fields
|
||||
stateDiff := new(statediff.StateDiff)
|
||||
decodeErr = rlp.DecodeBytes(payload.StateDiffRlp, stateDiff)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
for _, createdAccount := range stateDiff.CreatedAccounts {
|
||||
hashKey := common.BytesToHash(createdAccount.Key)
|
||||
convertedPayload.StateNodes[hashKey] = StateNode{
|
||||
Value: createdAccount.Value,
|
||||
Leaf: createdAccount.Leaf,
|
||||
}
|
||||
for _, storageDiff := range createdAccount.Storage {
|
||||
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
|
||||
Key: common.BytesToHash(storageDiff.Key),
|
||||
Value: storageDiff.Value,
|
||||
Leaf: storageDiff.Leaf,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, deletedAccount := range stateDiff.DeletedAccounts {
|
||||
hashKey := common.BytesToHash(deletedAccount.Key)
|
||||
convertedPayload.StateNodes[hashKey] = StateNode{
|
||||
Value: deletedAccount.Value,
|
||||
Leaf: deletedAccount.Leaf,
|
||||
}
|
||||
for _, storageDiff := range deletedAccount.Storage {
|
||||
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
|
||||
Key: common.BytesToHash(storageDiff.Key),
|
||||
Value: storageDiff.Value,
|
||||
Leaf: storageDiff.Leaf,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, updatedAccount := range stateDiff.UpdatedAccounts {
|
||||
hashKey := common.BytesToHash(updatedAccount.Key)
|
||||
convertedPayload.StateNodes[hashKey] = StateNode{
|
||||
Value: updatedAccount.Value,
|
||||
Leaf: updatedAccount.Leaf,
|
||||
}
|
||||
for _, storageDiff := range updatedAccount.Storage {
|
||||
convertedPayload.StorageNodes[hashKey] = append(convertedPayload.StorageNodes[hashKey], StorageNode{
|
||||
Key: common.BytesToHash(storageDiff.Key),
|
||||
Value: storageDiff.Value,
|
||||
Leaf: storageDiff.Leaf,
|
||||
})
|
||||
}
|
||||
}
|
||||
return convertedPayload, nil
|
||||
}
|
||||
|
||||
func handleNullAddr(to *common.Address) string {
|
||||
if to == nil {
|
||||
return "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
return to.Hex()
|
||||
}
|
55
pkg/ipfs/converter_test.go
Normal file
55
pkg/ipfs/converter_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
)
|
||||
|
||||
var _ = Describe("Converter", func() {
|
||||
Describe("Convert", func() {
|
||||
It("Converts mock statediff.Payloads into the expected IPLDPayloads", func() {
|
||||
converter := ipfs.NewPayloadConverter(params.MainnetChainConfig)
|
||||
converterPayload, err := converter.Convert(mocks.MockStateDiffPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(converterPayload.BlockNumber).To(Equal(mocks.BlockNumber))
|
||||
Expect(converterPayload.BlockHash).To(Equal(mocks.MockBlock.Hash()))
|
||||
Expect(converterPayload.StateNodes).To(Equal(mocks.MockStateNodes))
|
||||
Expect(converterPayload.StorageNodes).To(Equal(mocks.MockStorageNodes))
|
||||
gotBody, err := rlp.EncodeToBytes(converterPayload.BlockBody)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
expectedBody, err := rlp.EncodeToBytes(mocks.MockBlock.Body())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(gotBody).To(Equal(expectedBody))
|
||||
Expect(converterPayload.HeaderRLP).To(Equal(mocks.MockHeaderRlp))
|
||||
Expect(converterPayload.TrxMetaData).To(Equal(mocks.MockTrxMeta))
|
||||
Expect(converterPayload.ReceiptMetaData).To(Equal(mocks.MockRctMeta))
|
||||
})
|
||||
|
||||
It(" Throws an error if the wrong chain config is used", func() {
|
||||
converter := ipfs.NewPayloadConverter(params.TestnetChainConfig)
|
||||
_, err := converter.Convert(mocks.MockStateDiffPayload)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
230
pkg/ipfs/fetcher.go
Normal file
230
pkg/ipfs/fetcher.go
Normal file
@ -0,0 +1,230 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// IPLDFetcher is an interface for fetching IPLDs
|
||||
type IPLDFetcher interface {
|
||||
FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error)
|
||||
}
|
||||
|
||||
// EthIPLDFetcher is used to fetch ETH IPLD objects from IPFS
|
||||
type EthIPLDFetcher struct {
|
||||
BlockService blockservice.BlockService
|
||||
}
|
||||
|
||||
// NewIPLDFetcher creates a pointer to a new IPLDFetcher
|
||||
func NewIPLDFetcher(ipfsPath string) (*EthIPLDFetcher, error) {
|
||||
blockService, err := InitIPFSBlockService(ipfsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EthIPLDFetcher{
|
||||
BlockService: blockService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FetchIPLDs is the exported method for fetching and returning all the IPLDS specified in the CIDWrapper
|
||||
func (f *EthIPLDFetcher) FetchIPLDs(cids CIDWrapper) (*IPLDWrapper, error) {
|
||||
|
||||
log.Debug("fetching iplds")
|
||||
blocks := &IPLDWrapper{
|
||||
BlockNumber: cids.BlockNumber,
|
||||
Headers: make([]blocks.Block, 0),
|
||||
Uncles: make([]blocks.Block, 0),
|
||||
Transactions: make([]blocks.Block, 0),
|
||||
Receipts: make([]blocks.Block, 0),
|
||||
StateNodes: make(map[common.Hash]blocks.Block),
|
||||
StorageNodes: make(map[common.Hash]map[common.Hash]blocks.Block),
|
||||
}
|
||||
|
||||
headersErr := f.fetchHeaders(cids, blocks)
|
||||
if headersErr != nil {
|
||||
return nil, headersErr
|
||||
}
|
||||
unclesErr := f.fetchUncles(cids, blocks)
|
||||
if unclesErr != nil {
|
||||
return nil, unclesErr
|
||||
}
|
||||
trxsErr := f.fetchTrxs(cids, blocks)
|
||||
if trxsErr != nil {
|
||||
return nil, trxsErr
|
||||
}
|
||||
rctsErr := f.fetchRcts(cids, blocks)
|
||||
if rctsErr != nil {
|
||||
return nil, rctsErr
|
||||
}
|
||||
storageErr := f.fetchStorage(cids, blocks)
|
||||
if storageErr != nil {
|
||||
return nil, storageErr
|
||||
}
|
||||
stateErr := f.fetchState(cids, blocks)
|
||||
if stateErr != nil {
|
||||
return nil, stateErr
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// fetchHeaders fetches headers
|
||||
// It uses the f.fetchBatch method
|
||||
func (f *EthIPLDFetcher) fetchHeaders(cids CIDWrapper, blocks *IPLDWrapper) error {
|
||||
log.Debug("fetching header iplds")
|
||||
headerCids := make([]cid.Cid, 0, len(cids.Headers))
|
||||
for _, c := range cids.Headers {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headerCids = append(headerCids, dc)
|
||||
}
|
||||
blocks.Headers = f.fetchBatch(headerCids)
|
||||
if len(blocks.Headers) != len(headerCids) {
|
||||
log.Errorf("ipfs fetcher: number of header blocks returned (%d) does not match number expected (%d)", len(blocks.Headers), len(headerCids))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchUncles fetches uncles
|
||||
// It uses the f.fetchBatch method
|
||||
func (f *EthIPLDFetcher) fetchUncles(cids CIDWrapper, blocks *IPLDWrapper) error {
|
||||
log.Debug("fetching uncle iplds")
|
||||
uncleCids := make([]cid.Cid, 0, len(cids.Uncles))
|
||||
for _, c := range cids.Uncles {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uncleCids = append(uncleCids, dc)
|
||||
}
|
||||
blocks.Uncles = f.fetchBatch(uncleCids)
|
||||
if len(blocks.Uncles) != len(uncleCids) {
|
||||
log.Errorf("ipfs fetcher: number of uncle blocks returned (%d) does not match number expected (%d)", len(blocks.Uncles), len(uncleCids))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchTrxs fetches transactions
|
||||
// It uses the f.fetchBatch method
|
||||
func (f *EthIPLDFetcher) fetchTrxs(cids CIDWrapper, blocks *IPLDWrapper) error {
|
||||
log.Debug("fetching transaction iplds")
|
||||
trxCids := make([]cid.Cid, 0, len(cids.Transactions))
|
||||
for _, c := range cids.Transactions {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trxCids = append(trxCids, dc)
|
||||
}
|
||||
blocks.Transactions = f.fetchBatch(trxCids)
|
||||
if len(blocks.Transactions) != len(trxCids) {
|
||||
log.Errorf("ipfs fetcher: number of transaction blocks returned (%d) does not match number expected (%d)", len(blocks.Transactions), len(trxCids))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchRcts fetches receipts
|
||||
// It uses the f.fetchBatch method
|
||||
func (f *EthIPLDFetcher) fetchRcts(cids CIDWrapper, blocks *IPLDWrapper) error {
|
||||
log.Debug("fetching receipt iplds")
|
||||
rctCids := make([]cid.Cid, 0, len(cids.Receipts))
|
||||
for _, c := range cids.Receipts {
|
||||
dc, err := cid.Decode(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rctCids = append(rctCids, dc)
|
||||
}
|
||||
blocks.Receipts = f.fetchBatch(rctCids)
|
||||
if len(blocks.Receipts) != len(rctCids) {
|
||||
log.Errorf("ipfs fetcher: number of receipt blocks returned (%d) does not match number expected (%d)", len(blocks.Receipts), len(rctCids))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchState fetches state nodes
|
||||
// It uses the single f.fetch method instead of the batch fetch, because it
|
||||
// needs to maintain the data's relation to state keys
|
||||
func (f *EthIPLDFetcher) fetchState(cids CIDWrapper, blocks *IPLDWrapper) error {
|
||||
log.Debug("fetching state iplds")
|
||||
for _, stateNode := range cids.StateNodes {
|
||||
if stateNode.CID == "" || stateNode.Key == "" {
|
||||
continue
|
||||
}
|
||||
dc, decodeErr := cid.Decode(stateNode.CID)
|
||||
if decodeErr != nil {
|
||||
return decodeErr
|
||||
}
|
||||
block, fetchErr := f.fetch(dc)
|
||||
if fetchErr != nil {
|
||||
return fetchErr
|
||||
}
|
||||
blocks.StateNodes[common.HexToHash(stateNode.Key)] = block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchStorage fetches storage nodes
|
||||
// It uses the single f.fetch method instead of the batch fetch, because it
|
||||
// needs to maintain the data's relation to state and storage keys
|
||||
func (f *EthIPLDFetcher) fetchStorage(cids CIDWrapper, blks *IPLDWrapper) error {
|
||||
log.Debug("fetching storage iplds")
|
||||
for _, storageNode := range cids.StorageNodes {
|
||||
if storageNode.CID == "" || storageNode.Key == "" || storageNode.StateKey == "" {
|
||||
continue
|
||||
}
|
||||
dc, decodeErr := cid.Decode(storageNode.CID)
|
||||
if decodeErr != nil {
|
||||
return decodeErr
|
||||
}
|
||||
blk, fetchErr := f.fetch(dc)
|
||||
if fetchErr != nil {
|
||||
return fetchErr
|
||||
}
|
||||
if blks.StorageNodes[common.HexToHash(storageNode.StateKey)] == nil {
|
||||
blks.StorageNodes[common.HexToHash(storageNode.StateKey)] = make(map[common.Hash]blocks.Block)
|
||||
}
|
||||
blks.StorageNodes[common.HexToHash(storageNode.StateKey)][common.HexToHash(storageNode.Key)] = blk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is used to fetch a single cid
|
||||
func (f *EthIPLDFetcher) fetch(cid cid.Cid) (blocks.Block, error) {
|
||||
return f.BlockService.GetBlock(context.Background(), cid)
|
||||
}
|
||||
|
||||
// fetchBatch is used to fetch a batch of IPFS data blocks by cid
|
||||
// There is no guarantee all are fetched, and no error in such a case, so
|
||||
// downstream we will need to confirm which CIDs were fetched in the result set
|
||||
func (f *EthIPLDFetcher) fetchBatch(cids []cid.Cid) []blocks.Block {
|
||||
fetchedBlocks := make([]blocks.Block, 0, len(cids))
|
||||
blockChan := f.BlockService.GetBlocks(context.Background(), cids)
|
||||
for block := range blockChan {
|
||||
fetchedBlocks = append(fetchedBlocks, block)
|
||||
}
|
||||
return fetchedBlocks
|
||||
}
|
112
pkg/ipfs/fetcher_test.go
Normal file
112
pkg/ipfs/fetcher_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-block-format"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
)
|
||||
|
||||
var (
|
||||
mockHeaderData = []byte{0, 1, 2, 3, 4}
|
||||
mockUncleData = []byte{1, 2, 3, 4, 5}
|
||||
mockTrxData = []byte{2, 3, 4, 5, 6}
|
||||
mockReceiptData = []byte{3, 4, 5, 6, 7}
|
||||
mockStateData = []byte{4, 5, 6, 7, 8}
|
||||
mockStorageData = []byte{5, 6, 7, 8, 9}
|
||||
mockStorageData2 = []byte{6, 7, 8, 9, 1}
|
||||
mockHeaderBlock = blocks.NewBlock(mockHeaderData)
|
||||
mockUncleBlock = blocks.NewBlock(mockUncleData)
|
||||
mockTrxBlock = blocks.NewBlock(mockTrxData)
|
||||
mockReceiptBlock = blocks.NewBlock(mockReceiptData)
|
||||
mockStateBlock = blocks.NewBlock(mockStateData)
|
||||
mockStorageBlock1 = blocks.NewBlock(mockStorageData)
|
||||
mockStorageBlock2 = blocks.NewBlock(mockStorageData2)
|
||||
mockBlocks = []blocks.Block{mockHeaderBlock, mockUncleBlock, mockTrxBlock, mockReceiptBlock, mockStateBlock, mockStorageBlock1, mockStorageBlock2}
|
||||
mockBlockService *mocks.MockIPFSBlockService
|
||||
mockCIDWrapper = ipfs.CIDWrapper{
|
||||
BlockNumber: big.NewInt(9000),
|
||||
Headers: []string{mockHeaderBlock.Cid().String()},
|
||||
Uncles: []string{mockUncleBlock.Cid().String()},
|
||||
Transactions: []string{mockTrxBlock.Cid().String()},
|
||||
Receipts: []string{mockReceiptBlock.Cid().String()},
|
||||
StateNodes: []ipfs.StateNodeCID{{
|
||||
CID: mockStateBlock.Cid().String(),
|
||||
Leaf: true,
|
||||
Key: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
}},
|
||||
StorageNodes: []ipfs.StorageNodeCID{{
|
||||
CID: mockStorageBlock1.Cid().String(),
|
||||
Leaf: true,
|
||||
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
Key: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
CID: mockStorageBlock2.Cid().String(),
|
||||
Leaf: true,
|
||||
StateKey: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
Key: "0000000000000000000000000000000000000000000000000000000000000002",
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ = Describe("Fetcher", func() {
|
||||
Describe("FetchCIDs", func() {
|
||||
BeforeEach(func() {
|
||||
mockBlockService = new(mocks.MockIPFSBlockService)
|
||||
err := mockBlockService.AddBlocks(mockBlocks)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(mockBlockService.Blocks)).To(Equal(7))
|
||||
})
|
||||
|
||||
It("Fetches and returns IPLDs for the CIDs provided in the CIDWrapper", func() {
|
||||
fetcher := new(ipfs.EthIPLDFetcher)
|
||||
fetcher.BlockService = mockBlockService
|
||||
iplds, err := fetcher.FetchIPLDs(mockCIDWrapper)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(iplds.BlockNumber).To(Equal(mockCIDWrapper.BlockNumber))
|
||||
Expect(len(iplds.Headers)).To(Equal(1))
|
||||
Expect(iplds.Headers[0]).To(Equal(mockHeaderBlock))
|
||||
Expect(len(iplds.Uncles)).To(Equal(1))
|
||||
Expect(iplds.Uncles[0]).To(Equal(mockUncleBlock))
|
||||
Expect(len(iplds.Transactions)).To(Equal(1))
|
||||
Expect(iplds.Transactions[0]).To(Equal(mockTrxBlock))
|
||||
Expect(len(iplds.Receipts)).To(Equal(1))
|
||||
Expect(iplds.Receipts[0]).To(Equal(mockReceiptBlock))
|
||||
Expect(len(iplds.StateNodes)).To(Equal(1))
|
||||
stateNode, ok := iplds.StateNodes[common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")]
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(stateNode).To(Equal(mockStateBlock))
|
||||
Expect(len(iplds.StorageNodes)).To(Equal(1))
|
||||
storageNodes := iplds.StorageNodes[common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")]
|
||||
Expect(len(storageNodes)).To(Equal(2))
|
||||
storageNode1, ok := storageNodes[common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001")]
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(storageNode1).To(Equal(mockStorageBlock1))
|
||||
storageNode2, ok := storageNodes[common.HexToHash("0000000000000000000000000000000000000000000000000000000000000002")]
|
||||
Expect(storageNode2).To(Equal(mockStorageBlock2))
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
79
pkg/ipfs/helpers.go
Normal file
79
pkg/ipfs/helpers.go
Normal file
@ -0,0 +1,79 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-ipfs/core"
|
||||
"github.com/ipfs/go-ipfs/plugin/loader"
|
||||
"github.com/ipfs/go-ipfs/repo/fsrepo"
|
||||
)
|
||||
|
||||
// InitIPFSPlugins is used to initialized IPFS plugins before creating a new IPFS node
|
||||
// This should only be called once
|
||||
func InitIPFSPlugins() error {
|
||||
l, err := loader.NewPluginLoader("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = l.Initialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.Inject()
|
||||
}
|
||||
|
||||
// InitIPFSBlockService is used to configure and return a BlockService using an ipfs repo path (e.g. ~/.ipfs)
|
||||
func InitIPFSBlockService(ipfsPath string) (blockservice.BlockService, error) {
|
||||
r, openErr := fsrepo.Open(ipfsPath)
|
||||
if openErr != nil {
|
||||
return nil, openErr
|
||||
}
|
||||
ctx := context.Background()
|
||||
cfg := &core.BuildCfg{
|
||||
Online: false,
|
||||
Repo: r,
|
||||
}
|
||||
ipfsNode, newNodeErr := core.NewNode(ctx, cfg)
|
||||
if newNodeErr != nil {
|
||||
return nil, newNodeErr
|
||||
}
|
||||
return ipfsNode.Blocks, nil
|
||||
}
|
||||
|
||||
// AddressToKey hashes an address
|
||||
func AddressToKey(address common.Address) common.Hash {
|
||||
return crypto.Keccak256Hash(address[:])
|
||||
}
|
||||
|
||||
// HexToKey hashes a hex (0x leading or not) string
|
||||
func HexToKey(hex string) common.Hash {
|
||||
addr := common.FromHex(hex)
|
||||
return crypto.Keccak256Hash(addr[:])
|
||||
}
|
||||
|
||||
// EmptyCIDWrapper returns whether or not the provided CIDWrapper has any Cids we need to process
|
||||
func EmptyCIDWrapper(cids CIDWrapper) bool {
|
||||
if len(cids.Transactions) > 0 || len(cids.Headers) > 0 || len(cids.Uncles) > 0 || len(cids.Receipts) > 0 || len(cids.StateNodes) > 0 || len(cids.StorageNodes) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
35
pkg/ipfs/ipfs_suite_test.go
Normal file
35
pkg/ipfs/ipfs_suite_test.go
Normal file
@ -0,0 +1,35 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestIPFS(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "IPFS Suite Test")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
})
|
86
pkg/ipfs/mocks/blockservice.go
Normal file
86
pkg/ipfs/mocks/blockservice.go
Normal file
@ -0,0 +1,86 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-ipfs-blockstore"
|
||||
"github.com/ipfs/go-ipfs-exchange-interface"
|
||||
)
|
||||
|
||||
// MockIPFSBlockService is a mock for testing the ipfs fetcher
|
||||
type MockIPFSBlockService struct {
|
||||
Blocks map[cid.Cid]blocks.Block
|
||||
}
|
||||
|
||||
// GetBlock is used to retrieve a block from the mock BlockService
|
||||
func (bs *MockIPFSBlockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||
if bs.Blocks == nil {
|
||||
return nil, errors.New("BlockService has not been initialized")
|
||||
}
|
||||
blk, ok := bs.Blocks[c]
|
||||
if ok {
|
||||
return blk, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetBlocks is used to retrieve a set of blocks from the mock BlockService
|
||||
func (bs *MockIPFSBlockService) GetBlocks(ctx context.Context, cs []cid.Cid) <-chan blocks.Block {
|
||||
if bs.Blocks == nil {
|
||||
panic("BlockService has not been initialized")
|
||||
}
|
||||
blkChan := make(chan blocks.Block)
|
||||
go func() {
|
||||
for _, c := range cs {
|
||||
blk, ok := bs.Blocks[c]
|
||||
if ok {
|
||||
blkChan <- blk
|
||||
}
|
||||
}
|
||||
close(blkChan)
|
||||
}()
|
||||
return blkChan
|
||||
}
|
||||
|
||||
// AddBlock adds a block to the mock BlockService
|
||||
func (bs *MockIPFSBlockService) AddBlock(blk blocks.Block) error {
|
||||
if bs.Blocks == nil {
|
||||
bs.Blocks = make(map[cid.Cid]blocks.Block)
|
||||
}
|
||||
bs.Blocks[blk.Cid()] = blk
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddBlocks adds a set of blocks to the mock BlockService
|
||||
func (bs *MockIPFSBlockService) AddBlocks(blks []blocks.Block) error {
|
||||
if bs.Blocks == nil {
|
||||
bs.Blocks = make(map[cid.Cid]blocks.Block)
|
||||
}
|
||||
for _, block := range blks {
|
||||
bs.Blocks[block.Cid()] = block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is here to satisfy the interface
|
||||
func (*MockIPFSBlockService) Close() error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Blockstore is here to satisfy the interface
|
||||
func (*MockIPFSBlockService) Blockstore() blockstore.Blockstore {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteBlock is here to satisfy the interface
|
||||
func (*MockIPFSBlockService) DeleteBlock(c cid.Cid) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// Exchange is here to satisfy the interface
|
||||
func (*MockIPFSBlockService) Exchange() exchange.Interface {
|
||||
panic("implement me")
|
||||
}
|
57
pkg/ipfs/mocks/converter.go
Normal file
57
pkg/ipfs/mocks/converter.go
Normal file
@ -0,0 +1,57 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// PayloadConverter is the underlying struct for the Converter interface
|
||||
type PayloadConverter struct {
|
||||
PassedStatediffPayload statediff.Payload
|
||||
ReturnIPLDPayload *ipfs.IPLDPayload
|
||||
ReturnErr error
|
||||
}
|
||||
|
||||
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
||||
func (pc *PayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
|
||||
pc.PassedStatediffPayload = payload
|
||||
return pc.ReturnIPLDPayload, pc.ReturnErr
|
||||
}
|
||||
|
||||
// IterativePayloadConverter is the underlying struct for the Converter interface
|
||||
type IterativePayloadConverter struct {
|
||||
PassedStatediffPayload []statediff.Payload
|
||||
ReturnIPLDPayload []*ipfs.IPLDPayload
|
||||
ReturnErr error
|
||||
iteration int
|
||||
}
|
||||
|
||||
// Convert method is used to convert a geth statediff.Payload to a IPLDPayload
|
||||
func (pc *IterativePayloadConverter) Convert(payload statediff.Payload) (*ipfs.IPLDPayload, error) {
|
||||
pc.PassedStatediffPayload = append(pc.PassedStatediffPayload, payload)
|
||||
if len(pc.PassedStatediffPayload) < pc.iteration+1 {
|
||||
return nil, fmt.Errorf("IterativePayloadConverter does not have a payload to return at iteration %d", pc.iteration)
|
||||
}
|
||||
returnPayload := pc.ReturnIPLDPayload[pc.iteration]
|
||||
pc.iteration++
|
||||
return returnPayload, pc.ReturnErr
|
||||
}
|
53
pkg/ipfs/mocks/dag_putters.go
Normal file
53
pkg/ipfs/mocks/dag_putters.go
Normal file
@ -0,0 +1,53 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// DagPutter is a mock for testing the ipfs publisher
|
||||
type DagPutter struct {
|
||||
CIDsToReturn []string
|
||||
ErrToReturn error
|
||||
}
|
||||
|
||||
// DagPut returns the pre-loaded CIDs or error
|
||||
func (dp *DagPutter) DagPut(raw interface{}) ([]string, error) {
|
||||
return dp.CIDsToReturn, dp.ErrToReturn
|
||||
}
|
||||
|
||||
// MappedDagPutter is a mock for testing the ipfs publisher
|
||||
type MappedDagPutter struct {
|
||||
CIDsToReturn map[common.Hash][]string
|
||||
ErrToReturn error
|
||||
}
|
||||
|
||||
// DagPut returns the pre-loaded CIDs or error
|
||||
func (mdp *MappedDagPutter) DagPut(raw interface{}) ([]string, error) {
|
||||
if mdp.CIDsToReturn == nil {
|
||||
return nil, errors.New("mapped dag putter needs to be initialized with a map of cids to return")
|
||||
}
|
||||
by, ok := raw.([]byte)
|
||||
if !ok {
|
||||
return nil, errors.New("mapped dag putters can only dag put []byte values")
|
||||
}
|
||||
hash := common.BytesToHash(by)
|
||||
return mdp.CIDsToReturn[hash], nil
|
||||
}
|
55
pkg/ipfs/mocks/publisher.go
Normal file
55
pkg/ipfs/mocks/publisher.go
Normal file
@ -0,0 +1,55 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// IPLDPublisher is the underlying struct for the Publisher interface
|
||||
type IPLDPublisher struct {
|
||||
PassedIPLDPayload *ipfs.IPLDPayload
|
||||
ReturnCIDPayload *ipfs.CIDPayload
|
||||
ReturnErr error
|
||||
}
|
||||
|
||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||
func (pub *IPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
|
||||
pub.PassedIPLDPayload = payload
|
||||
return pub.ReturnCIDPayload, pub.ReturnErr
|
||||
}
|
||||
|
||||
// IterativeIPLDPublisher is the underlying struct for the Publisher interface; used in testing
|
||||
type IterativeIPLDPublisher struct {
|
||||
PassedIPLDPayload []*ipfs.IPLDPayload
|
||||
ReturnCIDPayload []*ipfs.CIDPayload
|
||||
ReturnErr error
|
||||
iteration int
|
||||
}
|
||||
|
||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||
func (pub *IterativeIPLDPublisher) Publish(payload *ipfs.IPLDPayload) (*ipfs.CIDPayload, error) {
|
||||
pub.PassedIPLDPayload = append(pub.PassedIPLDPayload, payload)
|
||||
if len(pub.ReturnCIDPayload) < pub.iteration+1 {
|
||||
return nil, fmt.Errorf("IterativeIPLDPublisher does not have a payload to return at iteration %d", pub.iteration)
|
||||
}
|
||||
returnPayload := pub.ReturnCIDPayload[pub.iteration]
|
||||
pub.iteration++
|
||||
return returnPayload, pub.ReturnErr
|
||||
}
|
370
pkg/ipfs/mocks/test_data.go
Normal file
370
pkg/ipfs/mocks/test_data.go
Normal file
@ -0,0 +1,370 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
rand2 "math/rand"
|
||||
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// Test variables
|
||||
var (
|
||||
// block data
|
||||
BlockNumber = big.NewInt(rand2.Int63())
|
||||
MockHeader = types.Header{
|
||||
Time: 0,
|
||||
Number: BlockNumber,
|
||||
Root: common.HexToHash("0x0"),
|
||||
TxHash: common.HexToHash("0x0"),
|
||||
ReceiptHash: common.HexToHash("0x0"),
|
||||
}
|
||||
MockTransactions, MockReceipts, senderAddr = createTransactionsAndReceipts()
|
||||
ReceiptsRlp, _ = rlp.EncodeToBytes(MockReceipts)
|
||||
MockBlock = types.NewBlock(&MockHeader, MockTransactions, nil, MockReceipts)
|
||||
MockBlockRlp, _ = rlp.EncodeToBytes(MockBlock)
|
||||
MockHeaderRlp, _ = rlp.EncodeToBytes(MockBlock.Header())
|
||||
MockTrxMeta = []*ipfs.TrxMetaData{
|
||||
{
|
||||
CID: "", // This is empty until we go to publish to ipfs
|
||||
Src: senderAddr.Hex(),
|
||||
Dst: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Src: senderAddr.Hex(),
|
||||
Dst: "0x0000000000000000000000000000000000000001",
|
||||
},
|
||||
}
|
||||
MockRctMeta = []*ipfs.ReceiptMetaData{
|
||||
{
|
||||
CID: "",
|
||||
Topic0s: []string{
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000004",
|
||||
},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Topic0s: []string{
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000005",
|
||||
},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000001",
|
||||
},
|
||||
}
|
||||
|
||||
// statediff data
|
||||
CodeHash = common.Hex2Bytes("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
NonceValue = rand2.Uint64()
|
||||
anotherNonceValue = rand2.Uint64()
|
||||
BalanceValue = rand2.Int63()
|
||||
anotherBalanceValue = rand2.Int63()
|
||||
ContractRoot = common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
StoragePath = common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").Bytes()
|
||||
StorageKey = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001").Bytes()
|
||||
StorageValue = common.Hex2Bytes("0x03")
|
||||
storage = []statediff.StorageDiff{{
|
||||
Key: StorageKey,
|
||||
Value: StorageValue,
|
||||
Path: StoragePath,
|
||||
Proof: [][]byte{},
|
||||
Leaf: true,
|
||||
}}
|
||||
emptyStorage = make([]statediff.StorageDiff, 0)
|
||||
Address = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
ContractLeafKey = ipfs.AddressToKey(Address)
|
||||
AnotherAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||
AnotherContractLeafKey = ipfs.AddressToKey(AnotherAddress)
|
||||
testAccount = state.Account{
|
||||
Nonce: NonceValue,
|
||||
Balance: big.NewInt(BalanceValue),
|
||||
Root: ContractRoot,
|
||||
CodeHash: CodeHash,
|
||||
}
|
||||
anotherTestAccount = state.Account{
|
||||
Nonce: anotherNonceValue,
|
||||
Balance: big.NewInt(anotherBalanceValue),
|
||||
Root: common.HexToHash("0x"),
|
||||
CodeHash: nil,
|
||||
}
|
||||
ValueBytes, _ = rlp.EncodeToBytes(testAccount)
|
||||
AnotherValueBytes, _ = rlp.EncodeToBytes(anotherTestAccount)
|
||||
CreatedAccountDiffs = []statediff.AccountDiff{
|
||||
{
|
||||
Key: ContractLeafKey.Bytes(),
|
||||
Value: ValueBytes,
|
||||
Storage: storage,
|
||||
Leaf: true,
|
||||
},
|
||||
{
|
||||
Key: AnotherContractLeafKey.Bytes(),
|
||||
Value: AnotherValueBytes,
|
||||
Storage: emptyStorage,
|
||||
Leaf: true,
|
||||
},
|
||||
}
|
||||
|
||||
MockStateDiff = statediff.StateDiff{
|
||||
BlockNumber: BlockNumber,
|
||||
BlockHash: MockBlock.Hash(),
|
||||
CreatedAccounts: CreatedAccountDiffs,
|
||||
}
|
||||
MockStateDiffBytes, _ = rlp.EncodeToBytes(MockStateDiff)
|
||||
MockStateNodes = map[common.Hash]ipfs.StateNode{
|
||||
ContractLeafKey: {
|
||||
Value: ValueBytes,
|
||||
Leaf: true,
|
||||
},
|
||||
AnotherContractLeafKey: {
|
||||
Value: AnotherValueBytes,
|
||||
Leaf: true,
|
||||
},
|
||||
}
|
||||
MockStorageNodes = map[common.Hash][]ipfs.StorageNode{
|
||||
ContractLeafKey: {
|
||||
{
|
||||
Key: common.BytesToHash(StorageKey),
|
||||
Value: StorageValue,
|
||||
Leaf: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// aggregate payloads
|
||||
MockStateDiffPayload = statediff.Payload{
|
||||
BlockRlp: MockBlockRlp,
|
||||
StateDiffRlp: MockStateDiffBytes,
|
||||
ReceiptsRlp: ReceiptsRlp,
|
||||
}
|
||||
|
||||
MockIPLDPayload = &ipfs.IPLDPayload{
|
||||
BlockNumber: big.NewInt(1),
|
||||
BlockHash: MockBlock.Hash(),
|
||||
Receipts: MockReceipts,
|
||||
HeaderRLP: MockHeaderRlp,
|
||||
BlockBody: MockBlock.Body(),
|
||||
TrxMetaData: []*ipfs.TrxMetaData{
|
||||
{
|
||||
CID: "",
|
||||
Src: senderAddr.Hex(),
|
||||
Dst: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Src: senderAddr.Hex(),
|
||||
Dst: "0x0000000000000000000000000000000000000001",
|
||||
},
|
||||
},
|
||||
ReceiptMetaData: []*ipfs.ReceiptMetaData{
|
||||
{
|
||||
CID: "",
|
||||
Topic0s: []string{
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000004",
|
||||
},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
{
|
||||
CID: "",
|
||||
Topic0s: []string{
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000005",
|
||||
},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000001",
|
||||
},
|
||||
},
|
||||
StorageNodes: MockStorageNodes,
|
||||
StateNodes: MockStateNodes,
|
||||
}
|
||||
|
||||
MockCIDPayload = &ipfs.CIDPayload{
|
||||
BlockNumber: "1",
|
||||
BlockHash: MockBlock.Hash(),
|
||||
HeaderCID: "mockHeaderCID",
|
||||
UncleCIDs: make(map[common.Hash]string),
|
||||
TransactionCIDs: map[common.Hash]*ipfs.TrxMetaData{
|
||||
MockTransactions[0].Hash(): {
|
||||
CID: "mockTrxCID1",
|
||||
Dst: "0x0000000000000000000000000000000000000000",
|
||||
Src: senderAddr.Hex(),
|
||||
},
|
||||
MockTransactions[1].Hash(): {
|
||||
CID: "mockTrxCID2",
|
||||
Dst: "0x0000000000000000000000000000000000000001",
|
||||
Src: senderAddr.Hex(),
|
||||
},
|
||||
},
|
||||
ReceiptCIDs: map[common.Hash]*ipfs.ReceiptMetaData{
|
||||
MockTransactions[0].Hash(): {
|
||||
CID: "mockRctCID1",
|
||||
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000004"},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000000",
|
||||
},
|
||||
MockTransactions[1].Hash(): {
|
||||
CID: "mockRctCID2",
|
||||
Topic0s: []string{"0x0000000000000000000000000000000000000000000000000000000000000005"},
|
||||
ContractAddress: "0x0000000000000000000000000000000000000001",
|
||||
},
|
||||
},
|
||||
StateNodeCIDs: map[common.Hash]ipfs.StateNodeCID{
|
||||
ContractLeafKey: {
|
||||
CID: "mockStateCID1",
|
||||
Leaf: true,
|
||||
Key: "",
|
||||
},
|
||||
AnotherContractLeafKey: {
|
||||
CID: "mockStateCID2",
|
||||
Leaf: true,
|
||||
Key: "",
|
||||
},
|
||||
},
|
||||
StorageNodeCIDs: map[common.Hash][]ipfs.StorageNodeCID{
|
||||
ContractLeafKey: {
|
||||
{
|
||||
CID: "mockStorageCID",
|
||||
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
Leaf: true,
|
||||
StateKey: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
MockCIDWrapper = &ipfs.CIDWrapper{
|
||||
BlockNumber: big.NewInt(1),
|
||||
Headers: []string{"mockHeaderCID"},
|
||||
Transactions: []string{"mockTrxCID1", "mockTrxCID2"},
|
||||
Receipts: []string{"mockRctCID1", "mockRctCID2"},
|
||||
Uncles: []string{},
|
||||
StateNodes: []ipfs.StateNodeCID{
|
||||
{
|
||||
CID: "mockStateCID1",
|
||||
Leaf: true,
|
||||
Key: ContractLeafKey.Hex(),
|
||||
},
|
||||
{
|
||||
CID: "mockStateCID2",
|
||||
Leaf: true,
|
||||
Key: AnotherContractLeafKey.Hex(),
|
||||
},
|
||||
},
|
||||
StorageNodes: []ipfs.StorageNodeCID{
|
||||
{
|
||||
CID: "mockStorageCID",
|
||||
Leaf: true,
|
||||
StateKey: ContractLeafKey.Hex(),
|
||||
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
MockIPLDWrapper = ipfs.IPLDWrapper{
|
||||
BlockNumber: big.NewInt(1),
|
||||
Headers: []blocks.Block{
|
||||
blocks.NewBlock(MockHeaderRlp),
|
||||
},
|
||||
Transactions: []blocks.Block{
|
||||
blocks.NewBlock(MockTransactions.GetRlp(0)),
|
||||
blocks.NewBlock(MockTransactions.GetRlp(1)),
|
||||
},
|
||||
Receipts: []blocks.Block{
|
||||
blocks.NewBlock(MockReceipts.GetRlp(0)),
|
||||
blocks.NewBlock(MockReceipts.GetRlp(1)),
|
||||
},
|
||||
StateNodes: map[common.Hash]blocks.Block{
|
||||
ContractLeafKey: blocks.NewBlock(ValueBytes),
|
||||
AnotherContractLeafKey: blocks.NewBlock(AnotherValueBytes),
|
||||
},
|
||||
StorageNodes: map[common.Hash]map[common.Hash]blocks.Block{
|
||||
ContractLeafKey: {
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): blocks.NewBlock(StorageValue),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
MockSeeNodePayload = streamer.SuperNodePayload{
|
||||
BlockNumber: big.NewInt(1),
|
||||
HeadersRlp: [][]byte{MockHeaderRlp},
|
||||
TransactionsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
|
||||
ReceiptsRlp: [][]byte{MockTransactions.GetRlp(0), MockTransactions.GetRlp(1)},
|
||||
StateNodesRlp: map[common.Hash][]byte{
|
||||
ContractLeafKey: ValueBytes,
|
||||
AnotherContractLeafKey: AnotherValueBytes,
|
||||
},
|
||||
StorageNodesRlp: map[common.Hash]map[common.Hash][]byte{
|
||||
ContractLeafKey: {
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"): StorageValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// createTransactionsAndReceipts is a helper function to generate signed mock transactions and mock receipts with mock logs
|
||||
func createTransactionsAndReceipts() (types.Transactions, types.Receipts, common.Address) {
|
||||
// make transactions
|
||||
trx1 := types.NewTransaction(0, common.HexToAddress("0x0"), big.NewInt(1000), 50, big.NewInt(100), nil)
|
||||
trx2 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(2000), 100, big.NewInt(200), nil)
|
||||
transactionSigner := types.MakeSigner(params.MainnetChainConfig, BlockNumber)
|
||||
mockCurve := elliptic.P256()
|
||||
mockPrvKey, err := ecdsa.GenerateKey(mockCurve, rand.Reader)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
signedTrx1, err := types.SignTx(trx1, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
signedTrx2, err := types.SignTx(trx2, transactionSigner, mockPrvKey)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
senderAddr, err := types.Sender(transactionSigner, signedTrx1) // same for both trx
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// make receipts
|
||||
mockTopic1 := common.HexToHash("0x04")
|
||||
mockReceipt1 := types.NewReceipt(common.HexToHash("0x0").Bytes(), false, 50)
|
||||
mockReceipt1.ContractAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476592")
|
||||
mockLog1 := &types.Log{
|
||||
Topics: []common.Hash{mockTopic1},
|
||||
}
|
||||
mockReceipt1.Logs = []*types.Log{mockLog1}
|
||||
mockReceipt1.TxHash = signedTrx1.Hash()
|
||||
mockTopic2 := common.HexToHash("0x05")
|
||||
mockReceipt2 := types.NewReceipt(common.HexToHash("0x1").Bytes(), false, 100)
|
||||
mockReceipt2.ContractAddress = common.HexToAddress("0xaE9BEa628c4Ce503DcFD7E305CaB4e29E7476593")
|
||||
mockLog2 := &types.Log{
|
||||
Topics: []common.Hash{mockTopic2},
|
||||
}
|
||||
mockReceipt2.Logs = []*types.Log{mockLog2}
|
||||
mockReceipt2.TxHash = signedTrx2.Hash()
|
||||
return types.Transactions{signedTrx1, signedTrx2}, types.Receipts{mockReceipt1, mockReceipt2}, senderAddr
|
||||
}
|
204
pkg/ipfs/publisher.go
Normal file
204
pkg/ipfs/publisher.go
Normal file
@ -0,0 +1,204 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_header"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_receipts"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_block_transactions"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_state_trie"
|
||||
"github.com/vulcanize/eth-block-extractor/pkg/ipfs/eth_storage_trie"
|
||||
rlp2 "github.com/vulcanize/eth-block-extractor/pkg/wrappers/rlp"
|
||||
)
|
||||
|
||||
// IPLDPublisher is the interface for publishing an IPLD payload
|
||||
type IPLDPublisher interface {
|
||||
Publish(payload *IPLDPayload) (*CIDPayload, error)
|
||||
}
|
||||
|
||||
// Publisher is the underlying struct for the IPLDPublisher interface
|
||||
type Publisher struct {
|
||||
HeaderPutter ipfs.DagPutter
|
||||
TransactionPutter ipfs.DagPutter
|
||||
ReceiptPutter ipfs.DagPutter
|
||||
StatePutter ipfs.DagPutter
|
||||
StoragePutter ipfs.DagPutter
|
||||
}
|
||||
|
||||
// NewIPLDPublisher creates a pointer to a new Publisher which satisfies the IPLDPublisher interface
|
||||
func NewIPLDPublisher(ipfsPath string) (*Publisher, error) {
|
||||
node, err := ipfs.InitIPFSNode(ipfsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Publisher{
|
||||
HeaderPutter: eth_block_header.NewBlockHeaderDagPutter(node, rlp2.RlpDecoder{}),
|
||||
TransactionPutter: eth_block_transactions.NewBlockTransactionsDagPutter(node),
|
||||
ReceiptPutter: eth_block_receipts.NewEthBlockReceiptDagPutter(node),
|
||||
StatePutter: eth_state_trie.NewStateTrieDagPutter(node),
|
||||
StoragePutter: eth_storage_trie.NewStorageTrieDagPutter(node),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Publish publishes an IPLDPayload to IPFS and returns the corresponding CIDPayload
|
||||
func (pub *Publisher) Publish(payload *IPLDPayload) (*CIDPayload, error) {
|
||||
// Process and publish headers
|
||||
headerCid, headersErr := pub.publishHeaders(payload.HeaderRLP)
|
||||
if headersErr != nil {
|
||||
return nil, headersErr
|
||||
}
|
||||
|
||||
// Process and publish uncles
|
||||
uncleCids := make(map[common.Hash]string)
|
||||
for _, uncle := range payload.BlockBody.Uncles {
|
||||
uncleRlp, encodeErr := rlp.EncodeToBytes(uncle)
|
||||
if encodeErr != nil {
|
||||
return nil, encodeErr
|
||||
}
|
||||
cid, unclesErr := pub.publishHeaders(uncleRlp)
|
||||
if unclesErr != nil {
|
||||
return nil, unclesErr
|
||||
}
|
||||
uncleCids[uncle.Hash()] = cid
|
||||
}
|
||||
|
||||
// Process and publish transactions
|
||||
transactionCids, trxsErr := pub.publishTransactions(payload.BlockBody, payload.TrxMetaData)
|
||||
if trxsErr != nil {
|
||||
return nil, trxsErr
|
||||
}
|
||||
|
||||
// Process and publish receipts
|
||||
receiptsCids, rctsErr := pub.publishReceipts(payload.Receipts, payload.ReceiptMetaData)
|
||||
if rctsErr != nil {
|
||||
return nil, rctsErr
|
||||
}
|
||||
|
||||
// Process and publish state leafs
|
||||
stateNodeCids, stateErr := pub.publishStateNodes(payload.StateNodes)
|
||||
if stateErr != nil {
|
||||
return nil, stateErr
|
||||
}
|
||||
|
||||
// Process and publish storage leafs
|
||||
storageNodeCids, storageErr := pub.publishStorageNodes(payload.StorageNodes)
|
||||
if storageErr != nil {
|
||||
return nil, storageErr
|
||||
}
|
||||
|
||||
// Package CIDs and their metadata into a single struct
|
||||
return &CIDPayload{
|
||||
BlockHash: payload.BlockHash,
|
||||
BlockNumber: payload.BlockNumber.String(),
|
||||
HeaderCID: headerCid,
|
||||
UncleCIDs: uncleCids,
|
||||
TransactionCIDs: transactionCids,
|
||||
ReceiptCIDs: receiptsCids,
|
||||
StateNodeCIDs: stateNodeCids,
|
||||
StorageNodeCIDs: storageNodeCids,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pub *Publisher) publishHeaders(headerRLP []byte) (string, error) {
|
||||
headerCids, err := pub.HeaderPutter.DagPut(headerRLP)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(headerCids) != 1 {
|
||||
return "", errors.New("single CID expected to be returned for header")
|
||||
}
|
||||
return headerCids[0], nil
|
||||
}
|
||||
|
||||
func (pub *Publisher) publishTransactions(blockBody *types.Body, trxMeta []*TrxMetaData) (map[common.Hash]*TrxMetaData, error) {
|
||||
transactionCids, err := pub.TransactionPutter.DagPut(blockBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(transactionCids) != len(blockBody.Transactions) {
|
||||
return nil, errors.New("expected one CID for each transaction")
|
||||
}
|
||||
mappedTrxCids := make(map[common.Hash]*TrxMetaData, len(transactionCids))
|
||||
for i, trx := range blockBody.Transactions {
|
||||
mappedTrxCids[trx.Hash()] = trxMeta[i]
|
||||
mappedTrxCids[trx.Hash()].CID = transactionCids[i]
|
||||
}
|
||||
return mappedTrxCids, nil
|
||||
}
|
||||
|
||||
func (pub *Publisher) publishReceipts(receipts types.Receipts, receiptMeta []*ReceiptMetaData) (map[common.Hash]*ReceiptMetaData, error) {
|
||||
receiptsCids, err := pub.ReceiptPutter.DagPut(receipts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(receiptsCids) != len(receipts) {
|
||||
return nil, errors.New("expected one CID for each receipt")
|
||||
}
|
||||
// Keep receipts associated with their transaction
|
||||
mappedRctCids := make(map[common.Hash]*ReceiptMetaData, len(receiptsCids))
|
||||
for i, rct := range receipts {
|
||||
mappedRctCids[rct.TxHash] = receiptMeta[i]
|
||||
mappedRctCids[rct.TxHash].CID = receiptsCids[i]
|
||||
}
|
||||
return mappedRctCids, nil
|
||||
}
|
||||
|
||||
func (pub *Publisher) publishStateNodes(stateNodes map[common.Hash]StateNode) (map[common.Hash]StateNodeCID, error) {
|
||||
stateNodeCids := make(map[common.Hash]StateNodeCID)
|
||||
for addrKey, node := range stateNodes {
|
||||
stateNodeCid, err := pub.StatePutter.DagPut(node.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(stateNodeCid) != 1 {
|
||||
return nil, errors.New("single CID expected to be returned for state leaf")
|
||||
}
|
||||
stateNodeCids[addrKey] = StateNodeCID{
|
||||
CID: stateNodeCid[0],
|
||||
Leaf: node.Leaf,
|
||||
}
|
||||
}
|
||||
return stateNodeCids, nil
|
||||
}
|
||||
|
||||
func (pub *Publisher) publishStorageNodes(storageNodes map[common.Hash][]StorageNode) (map[common.Hash][]StorageNodeCID, error) {
|
||||
storageLeafCids := make(map[common.Hash][]StorageNodeCID)
|
||||
for addrKey, storageTrie := range storageNodes {
|
||||
storageLeafCids[addrKey] = make([]StorageNodeCID, 0, len(storageTrie))
|
||||
for _, node := range storageTrie {
|
||||
storageNodeCid, err := pub.StoragePutter.DagPut(node.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(storageNodeCid) != 1 {
|
||||
return nil, errors.New("single CID expected to be returned for storage leaf")
|
||||
}
|
||||
storageLeafCids[addrKey] = append(storageLeafCids[addrKey], StorageNodeCID{
|
||||
Key: node.Key.Hex(),
|
||||
CID: storageNodeCid[0],
|
||||
Leaf: node.Leaf,
|
||||
})
|
||||
}
|
||||
}
|
||||
return storageLeafCids, nil
|
||||
}
|
82
pkg/ipfs/publisher_test.go
Normal file
82
pkg/ipfs/publisher_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
)
|
||||
|
||||
var (
|
||||
mockHeaderDagPutter *mocks.DagPutter
|
||||
mockTrxDagPutter *mocks.DagPutter
|
||||
mockRctDagPutter *mocks.DagPutter
|
||||
mockStateDagPutter *mocks.MappedDagPutter
|
||||
mockStorageDagPutter *mocks.DagPutter
|
||||
)
|
||||
|
||||
var _ = Describe("Publisher", func() {
|
||||
BeforeEach(func() {
|
||||
mockHeaderDagPutter = new(mocks.DagPutter)
|
||||
mockTrxDagPutter = new(mocks.DagPutter)
|
||||
mockRctDagPutter = new(mocks.DagPutter)
|
||||
mockStateDagPutter = new(mocks.MappedDagPutter)
|
||||
mockStorageDagPutter = new(mocks.DagPutter)
|
||||
})
|
||||
|
||||
Describe("Publish", func() {
|
||||
It("Publishes the passed IPLDPayload objects to IPFS and returns a CIDPayload for indexing", func() {
|
||||
mockHeaderDagPutter.CIDsToReturn = []string{"mockHeaderCID"}
|
||||
mockTrxDagPutter.CIDsToReturn = []string{"mockTrxCID1", "mockTrxCID2"}
|
||||
mockRctDagPutter.CIDsToReturn = []string{"mockRctCID1", "mockRctCID2"}
|
||||
val1 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.ContractLeafKey].Value)
|
||||
val2 := common.BytesToHash(mocks.MockIPLDPayload.StateNodes[mocks.AnotherContractLeafKey].Value)
|
||||
mockStateDagPutter.CIDsToReturn = map[common.Hash][]string{
|
||||
val1: {"mockStateCID1"},
|
||||
val2: {"mockStateCID2"},
|
||||
}
|
||||
mockStorageDagPutter.CIDsToReturn = []string{"mockStorageCID"}
|
||||
publisher := ipfs.Publisher{
|
||||
HeaderPutter: mockHeaderDagPutter,
|
||||
TransactionPutter: mockTrxDagPutter,
|
||||
ReceiptPutter: mockRctDagPutter,
|
||||
StatePutter: mockStateDagPutter,
|
||||
StoragePutter: mockStorageDagPutter,
|
||||
}
|
||||
cidPayload, err := publisher.Publish(mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cidPayload.BlockNumber).To(Equal(mocks.MockCIDPayload.BlockNumber))
|
||||
Expect(cidPayload.BlockHash).To(Equal(mocks.MockCIDPayload.BlockHash))
|
||||
Expect(cidPayload.UncleCIDs).To(Equal(mocks.MockCIDPayload.UncleCIDs))
|
||||
Expect(cidPayload.HeaderCID).To(Equal(mocks.MockCIDPayload.HeaderCID))
|
||||
Expect(len(cidPayload.TransactionCIDs)).To(Equal(2))
|
||||
Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[0].Hash()]))
|
||||
Expect(cidPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.TransactionCIDs[mocks.MockTransactions[1].Hash()]))
|
||||
Expect(len(cidPayload.ReceiptCIDs)).To(Equal(2))
|
||||
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[0].Hash()]))
|
||||
Expect(cidPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]).To(Equal(mocks.MockCIDPayload.ReceiptCIDs[mocks.MockTransactions[1].Hash()]))
|
||||
Expect(len(cidPayload.StateNodeCIDs)).To(Equal(2))
|
||||
Expect(cidPayload.StateNodeCIDs[mocks.ContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.ContractLeafKey]))
|
||||
Expect(cidPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]).To(Equal(mocks.MockCIDPayload.StateNodeCIDs[mocks.AnotherContractLeafKey]))
|
||||
Expect(cidPayload.StorageNodeCIDs).To(Equal(mocks.MockCIDPayload.StorageNodeCIDs))
|
||||
})
|
||||
})
|
||||
})
|
97
pkg/ipfs/resolver.go
Normal file
97
pkg/ipfs/resolver.go
Normal file
@ -0,0 +1,97 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ipfs/go-block-format"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
||||
)
|
||||
|
||||
// IPLDResolver is the interface to resolving IPLDs
|
||||
type IPLDResolver interface {
|
||||
ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload
|
||||
}
|
||||
|
||||
// EthIPLDResolver is the underlying struct to support the IPLDResolver interface
|
||||
type EthIPLDResolver struct{}
|
||||
|
||||
// NewIPLDResolver returns a pointer to an EthIPLDResolver which satisfies the IPLDResolver interface
|
||||
func NewIPLDResolver() *EthIPLDResolver {
|
||||
return &EthIPLDResolver{}
|
||||
}
|
||||
|
||||
// ResolveIPLDs is the exported method for resolving all of the ETH IPLDs packaged in an IpfsBlockWrapper
|
||||
func (eir *EthIPLDResolver) ResolveIPLDs(ipfsBlocks IPLDWrapper) streamer.SuperNodePayload {
|
||||
response := &streamer.SuperNodePayload{
|
||||
BlockNumber: ipfsBlocks.BlockNumber,
|
||||
StateNodesRlp: make(map[common.Hash][]byte),
|
||||
StorageNodesRlp: make(map[common.Hash]map[common.Hash][]byte),
|
||||
}
|
||||
eir.resolveHeaders(ipfsBlocks.Headers, response)
|
||||
eir.resolveUncles(ipfsBlocks.Uncles, response)
|
||||
eir.resolveTransactions(ipfsBlocks.Transactions, response)
|
||||
eir.resolveReceipts(ipfsBlocks.Receipts, response)
|
||||
eir.resolveState(ipfsBlocks.StateNodes, response)
|
||||
eir.resolveStorage(ipfsBlocks.StorageNodes, response)
|
||||
return *response
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveHeaders(blocks []blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for _, block := range blocks {
|
||||
raw := block.RawData()
|
||||
response.HeadersRlp = append(response.HeadersRlp, raw)
|
||||
}
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveUncles(blocks []blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for _, block := range blocks {
|
||||
raw := block.RawData()
|
||||
response.UnclesRlp = append(response.UnclesRlp, raw)
|
||||
}
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveTransactions(blocks []blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for _, block := range blocks {
|
||||
raw := block.RawData()
|
||||
response.TransactionsRlp = append(response.TransactionsRlp, raw)
|
||||
}
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveReceipts(blocks []blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for _, block := range blocks {
|
||||
raw := block.RawData()
|
||||
response.ReceiptsRlp = append(response.ReceiptsRlp, raw)
|
||||
}
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveState(blocks map[common.Hash]blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for key, block := range blocks {
|
||||
raw := block.RawData()
|
||||
response.StateNodesRlp[key] = raw
|
||||
}
|
||||
}
|
||||
|
||||
func (eir *EthIPLDResolver) resolveStorage(blocks map[common.Hash]map[common.Hash]blocks.Block, response *streamer.SuperNodePayload) {
|
||||
for stateKey, storageBlocks := range blocks {
|
||||
response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
|
||||
for storageKey, storageVal := range storageBlocks {
|
||||
raw := storageVal.RawData()
|
||||
response.StorageNodesRlp[stateKey][storageKey] = raw
|
||||
}
|
||||
}
|
||||
}
|
52
pkg/ipfs/resolver_test.go
Normal file
52
pkg/ipfs/resolver_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
)
|
||||
|
||||
var (
|
||||
resolver ipfs.IPLDResolver
|
||||
)
|
||||
|
||||
var _ = Describe("Resolver", func() {
|
||||
Describe("ResolveIPLDs", func() {
|
||||
BeforeEach(func() {
|
||||
resolver = ipfs.NewIPLDResolver()
|
||||
})
|
||||
It("Resolves IPLD data to their correct geth data types and packages them to send to requesting transformers", func() {
|
||||
superNodePayload := resolver.ResolveIPLDs(mocks.MockIPLDWrapper)
|
||||
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
|
||||
Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeeNodePayload.UnclesRlp))
|
||||
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
|
||||
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(0))).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, mocks.MockReceipts.GetRlp(1))).To(BeTrue())
|
||||
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
|
||||
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
|
||||
})
|
||||
})
|
||||
})
|
114
pkg/ipfs/types.go
Normal file
114
pkg/ipfs/types.go
Normal file
@ -0,0 +1,114 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ipfs
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ipfs/go-block-format"
|
||||
)
|
||||
|
||||
// CIDWrapper is used to package CIDs retrieved from the local Postgres cache and direct fetching of IPLDs
|
||||
type CIDWrapper struct {
|
||||
BlockNumber *big.Int
|
||||
Headers []string
|
||||
Uncles []string
|
||||
Transactions []string
|
||||
Receipts []string
|
||||
StateNodes []StateNodeCID
|
||||
StorageNodes []StorageNodeCID
|
||||
}
|
||||
|
||||
// IPLDWrapper is used to package raw IPLD block data fetched from IPFS
|
||||
type IPLDWrapper struct {
|
||||
BlockNumber *big.Int
|
||||
Headers []blocks.Block
|
||||
Uncles []blocks.Block
|
||||
Transactions []blocks.Block
|
||||
Receipts []blocks.Block
|
||||
StateNodes map[common.Hash]blocks.Block
|
||||
StorageNodes map[common.Hash]map[common.Hash]blocks.Block
|
||||
}
|
||||
|
||||
// IPLDPayload is a custom type which packages raw ETH data for the IPFS publisher
|
||||
type IPLDPayload struct {
|
||||
HeaderRLP []byte
|
||||
BlockNumber *big.Int
|
||||
BlockHash common.Hash
|
||||
BlockBody *types.Body
|
||||
TrxMetaData []*TrxMetaData
|
||||
Receipts types.Receipts
|
||||
ReceiptMetaData []*ReceiptMetaData
|
||||
StateNodes map[common.Hash]StateNode
|
||||
StorageNodes map[common.Hash][]StorageNode
|
||||
}
|
||||
|
||||
// StateNode struct used to flag node as leaf or not
|
||||
type StateNode struct {
|
||||
Value []byte
|
||||
Leaf bool
|
||||
}
|
||||
|
||||
// StorageNode struct used to flag node as leaf or not
|
||||
type StorageNode struct {
|
||||
Key common.Hash
|
||||
Value []byte
|
||||
Leaf bool
|
||||
}
|
||||
|
||||
// CIDPayload is a struct to hold all the CIDs and their meta data
|
||||
type CIDPayload struct {
|
||||
BlockNumber string
|
||||
BlockHash common.Hash
|
||||
HeaderCID string
|
||||
UncleCIDs map[common.Hash]string
|
||||
TransactionCIDs map[common.Hash]*TrxMetaData
|
||||
ReceiptCIDs map[common.Hash]*ReceiptMetaData
|
||||
StateNodeCIDs map[common.Hash]StateNodeCID
|
||||
StorageNodeCIDs map[common.Hash][]StorageNodeCID
|
||||
}
|
||||
|
||||
// StateNodeCID is used to associate a leaf flag with a state node cid
|
||||
type StateNodeCID struct {
|
||||
CID string
|
||||
Leaf bool
|
||||
Key string `db:"state_key"`
|
||||
}
|
||||
|
||||
// StorageNodeCID is used to associate a leaf flag with a storage node cid
|
||||
type StorageNodeCID struct {
|
||||
Key string `db:"storage_key"`
|
||||
CID string
|
||||
Leaf bool
|
||||
StateKey string `db:"state_key"`
|
||||
}
|
||||
|
||||
// ReceiptMetaData wraps some additional data around our receipt CIDs for indexing
|
||||
type ReceiptMetaData struct {
|
||||
CID string
|
||||
Topic0s []string
|
||||
ContractAddress string
|
||||
}
|
||||
|
||||
// TrxMetaData wraps some additional data around our transaction CID for indexing
|
||||
type TrxMetaData struct {
|
||||
CID string
|
||||
Src string
|
||||
Dst string
|
||||
}
|
@ -18,6 +18,7 @@ package plugin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/plugin/builder"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/plugin/manager"
|
||||
|
@ -19,13 +19,15 @@ package manager
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"github.com/lib/pq"
|
||||
"github.com/pressly/goose"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/plugin/helpers"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/lib/pq"
|
||||
"github.com/pressly/goose"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/plugin/helpers"
|
||||
)
|
||||
|
||||
// Interface for managing the db migrations for plugin transformers
|
||||
|
90
pkg/super_node/api.go
Normal file
90
pkg/super_node/api.go
Normal file
@ -0,0 +1,90 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
)
|
||||
|
||||
// APIName is the namespace used for the state diffing service API
|
||||
const APIName = "vdb"
|
||||
|
||||
// APIVersion is the version of the state diffing service API
|
||||
const APIVersion = "0.0.1"
|
||||
|
||||
// PublicSuperNodeAPI is the public api for the super node
|
||||
type PublicSuperNodeAPI struct {
|
||||
sni NodeInterface
|
||||
}
|
||||
|
||||
// NewPublicSuperNodeAPI creates a new PublicSuperNodeAPI with the provided underlying SyncPublishScreenAndServe process
|
||||
func NewPublicSuperNodeAPI(superNodeInterface NodeInterface) *PublicSuperNodeAPI {
|
||||
return &PublicSuperNodeAPI{
|
||||
sni: superNodeInterface,
|
||||
}
|
||||
}
|
||||
|
||||
// Stream is the public method to setup a subscription that fires off SyncPublishScreenAndServe payloads as they are created
|
||||
func (api *PublicSuperNodeAPI) Stream(ctx context.Context, streamFilters config.Subscription) (*rpc.Subscription, error) {
|
||||
// ensure that the RPC connection supports subscriptions
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return nil, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
// create subscription and start waiting for statediff events
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
|
||||
go func() {
|
||||
// subscribe to events from the SyncPublishScreenAndServe service
|
||||
payloadChannel := make(chan streamer.SuperNodePayload, payloadChanBufferSize)
|
||||
quitChan := make(chan bool, 1)
|
||||
go api.sni.Subscribe(rpcSub.ID, payloadChannel, quitChan, streamFilters)
|
||||
|
||||
// loop and await state diff payloads and relay them to the subscriber with then notifier
|
||||
for {
|
||||
select {
|
||||
case packet := <-payloadChannel:
|
||||
if notifyErr := notifier.Notify(rpcSub.ID, packet); notifyErr != nil {
|
||||
log.Error("Failed to send state diff packet", "err", notifyErr)
|
||||
api.sni.Unsubscribe(rpcSub.ID)
|
||||
return
|
||||
}
|
||||
case <-rpcSub.Err():
|
||||
api.sni.Unsubscribe(rpcSub.ID)
|
||||
return
|
||||
case <-quitChan:
|
||||
// don't need to unsubscribe, SyncPublishScreenAndServe service does so before sending the quit signal
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// Node is a public rpc method to allow transformers to fetch the Geth node info for the super node
|
||||
func (api *PublicSuperNodeAPI) Node() core.Node {
|
||||
return api.sni.Node()
|
||||
}
|
219
pkg/super_node/backfiller.go
Normal file
219
pkg/super_node/backfiller.go
Normal file
@ -0,0 +1,219 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/fetcher"
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/storage/utils"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMaxBatchSize uint64 = 100
|
||||
defaultMaxBatchNumber int64 = 10
|
||||
)
|
||||
|
||||
// BackFillInterface for filling in gaps in the super node
|
||||
type BackFillInterface interface {
|
||||
// Method for the super node to periodically check for and fill in gaps in its data using an archival node
|
||||
FillGaps(wg *sync.WaitGroup, quitChan <-chan bool)
|
||||
}
|
||||
|
||||
// BackFillService for filling in gaps in the super node
|
||||
type BackFillService struct {
|
||||
// Interface for converting statediff payloads into ETH-IPLD object payloads
|
||||
Converter ipfs.PayloadConverter
|
||||
// Interface for publishing the ETH-IPLD payloads to IPFS
|
||||
Publisher ipfs.IPLDPublisher
|
||||
// Interface for indexing the CIDs of the published ETH-IPLDs in Postgres
|
||||
Repository CIDRepository
|
||||
// Interface for searching and retrieving CIDs from Postgres index
|
||||
Retriever CIDRetriever
|
||||
// State-diff fetcher; needs to be configured with an archival core.RpcClient
|
||||
Fetcher fetcher.StateDiffFetcher
|
||||
// Check frequency
|
||||
GapCheckFrequency time.Duration
|
||||
// size of batch fetches
|
||||
BatchSize uint64
|
||||
}
|
||||
|
||||
// NewBackFillService returns a new BackFillInterface
|
||||
func NewBackFillService(ipfsPath string, db *postgres.DB, archivalNodeRPCClient core.RPCClient, freq time.Duration, batchSize uint64) (BackFillInterface, error) {
|
||||
publisher, err := ipfs.NewIPLDPublisher(ipfsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BackFillService{
|
||||
Repository: NewCIDRepository(db),
|
||||
Converter: ipfs.NewPayloadConverter(params.MainnetChainConfig),
|
||||
Publisher: publisher,
|
||||
Retriever: NewCIDRetriever(db),
|
||||
Fetcher: fetcher.NewStateDiffFetcher(archivalNodeRPCClient),
|
||||
GapCheckFrequency: freq,
|
||||
BatchSize: batchSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FillGaps periodically checks for and fills in gaps in the super node db
|
||||
// this requires a core.RpcClient that is pointed at an archival node with the StateDiffAt method exposed
|
||||
func (bfs *BackFillService) FillGaps(wg *sync.WaitGroup, quitChan <-chan bool) {
|
||||
ticker := time.NewTicker(bfs.GapCheckFrequency)
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-quitChan:
|
||||
log.Info("quiting FillGaps process")
|
||||
wg.Done()
|
||||
return
|
||||
case <-ticker.C:
|
||||
log.Info("searching for gaps in the super node database")
|
||||
startingBlock, firstBlockErr := bfs.Retriever.RetrieveFirstBlockNumber()
|
||||
if firstBlockErr != nil {
|
||||
log.Error(firstBlockErr)
|
||||
continue
|
||||
}
|
||||
if startingBlock != 1 {
|
||||
log.Info("found gap at the beginning of the sync")
|
||||
bfs.fillGaps(1, uint64(startingBlock-1))
|
||||
}
|
||||
|
||||
gaps, gapErr := bfs.Retriever.RetrieveGapsInData()
|
||||
if gapErr != nil {
|
||||
log.Error(gapErr)
|
||||
continue
|
||||
}
|
||||
for _, gap := range gaps {
|
||||
bfs.fillGaps(gap[0], gap[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Info("fillGaps goroutine successfully spun up")
|
||||
}
|
||||
|
||||
func (bfs *BackFillService) fillGaps(startingBlock, endingBlock uint64) {
|
||||
log.Infof("going to fill in gap from %d to %d", startingBlock, endingBlock)
|
||||
errChan := make(chan error)
|
||||
done := make(chan bool)
|
||||
backFillInitErr := bfs.backFill(startingBlock, endingBlock, errChan, done)
|
||||
if backFillInitErr != nil {
|
||||
log.Error(backFillInitErr)
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
log.Error(err)
|
||||
case <-done:
|
||||
log.Infof("finished filling in gap from %d to %d", startingBlock, endingBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// backFill fetches, processes, and returns utils.StorageDiffs over a range of blocks
|
||||
// It splits a large range up into smaller chunks, batch fetching and processing those chunks concurrently
|
||||
func (bfs *BackFillService) backFill(startingBlock, endingBlock uint64, errChan chan error, done chan bool) error {
|
||||
if endingBlock < startingBlock {
|
||||
return errors.New("backfill: ending block number needs to be greater than starting block number")
|
||||
}
|
||||
//
|
||||
// break the range up into bins of smaller ranges
|
||||
blockRangeBins, err := utils.GetBlockHeightBins(startingBlock, endingBlock, bfs.BatchSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// int64 for atomic incrementing and decrementing to track the number of active processing goroutines we have
|
||||
var activeCount int64
|
||||
// channel for processing goroutines to signal when they are done
|
||||
processingDone := make(chan [2]uint64)
|
||||
forwardDone := make(chan bool)
|
||||
|
||||
// for each block range bin spin up a goroutine to batch fetch and process state diffs for that range
|
||||
go func() {
|
||||
for _, blockHeights := range blockRangeBins {
|
||||
// if we have reached our limit of active goroutines
|
||||
// wait for one to finish before starting the next
|
||||
if atomic.AddInt64(&activeCount, 1) > defaultMaxBatchNumber {
|
||||
// this blocks until a process signals it has finished
|
||||
<-forwardDone
|
||||
}
|
||||
go func(blockHeights []uint64) {
|
||||
payloads, fetchErr := bfs.Fetcher.FetchStateDiffsAt(blockHeights)
|
||||
if fetchErr != nil {
|
||||
errChan <- fetchErr
|
||||
}
|
||||
for _, payload := range payloads {
|
||||
ipldPayload, convertErr := bfs.Converter.Convert(payload)
|
||||
if convertErr != nil {
|
||||
errChan <- convertErr
|
||||
continue
|
||||
}
|
||||
cidPayload, publishErr := bfs.Publisher.Publish(ipldPayload)
|
||||
if publishErr != nil {
|
||||
errChan <- publishErr
|
||||
continue
|
||||
}
|
||||
indexErr := bfs.Repository.Index(cidPayload)
|
||||
if indexErr != nil {
|
||||
errChan <- indexErr
|
||||
}
|
||||
}
|
||||
// when this goroutine is done, send out a signal
|
||||
processingDone <- [2]uint64{blockHeights[0], blockHeights[len(blockHeights)-1]}
|
||||
}(blockHeights)
|
||||
}
|
||||
}()
|
||||
|
||||
// goroutine that listens on the processingDone chan
|
||||
// keeps track of the number of processing goroutines that have finished
|
||||
// when they have all finished, sends the final signal out
|
||||
go func() {
|
||||
goroutinesFinished := 0
|
||||
for {
|
||||
select {
|
||||
case doneWithHeights := <-processingDone:
|
||||
atomic.AddInt64(&activeCount, -1)
|
||||
select {
|
||||
// if we are waiting for a process to finish, signal that one has
|
||||
case forwardDone <- true:
|
||||
default:
|
||||
}
|
||||
log.Infof("finished filling in gap sub-bin from %d to %d", doneWithHeights[0], doneWithHeights[1])
|
||||
goroutinesFinished++
|
||||
if goroutinesFinished >= len(blockRangeBins) {
|
||||
done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
190
pkg/super_node/backfiller_test.go
Normal file
190
pkg/super_node/backfiller_test.go
Normal file
@ -0,0 +1,190 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/statediff"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
mocks2 "github.com/vulcanize/vulcanizedb/libraries/shared/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
mocks3 "github.com/vulcanize/vulcanizedb/pkg/super_node/mocks"
|
||||
)
|
||||
|
||||
var _ = Describe("BackFiller", func() {
|
||||
Describe("FillGaps", func() {
|
||||
It("Periodically checks for and fills in gaps in the super node's data", func() {
|
||||
mockCidRepo := &mocks3.CIDRepository{
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockPublisher := &mocks.IterativeIPLDPublisher{
|
||||
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockConverter := &mocks.IterativePayloadConverter{
|
||||
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockRetriever := &mocks3.MockCIDRetriever{
|
||||
FirstBlockNumberToReturn: 1,
|
||||
GapsToRetrieve: [][2]uint64{
|
||||
{
|
||||
100, 101,
|
||||
},
|
||||
},
|
||||
}
|
||||
mockFetcher := &mocks2.StateDiffFetcher{
|
||||
PayloadsToReturn: map[uint64]statediff.Payload{
|
||||
100: mocks.MockStateDiffPayload,
|
||||
101: mocks.MockStateDiffPayload,
|
||||
},
|
||||
}
|
||||
backfiller := &super_node.BackFillService{
|
||||
Repository: mockCidRepo,
|
||||
Publisher: mockPublisher,
|
||||
Converter: mockConverter,
|
||||
Fetcher: mockFetcher,
|
||||
Retriever: mockRetriever,
|
||||
GapCheckFrequency: time.Second * 2,
|
||||
BatchSize: super_node.DefaultMaxBatchSize,
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
quitChan := make(chan bool, 1)
|
||||
backfiller.FillGaps(wg, quitChan)
|
||||
time.Sleep(time.Second * 3)
|
||||
quitChan <- true
|
||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
||||
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
|
||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
||||
Expect(mockRetriever.CalledTimes).To(Equal(1))
|
||||
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
|
||||
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{100, 101}))
|
||||
})
|
||||
|
||||
It("Works for single block `ranges`", func() {
|
||||
mockCidRepo := &mocks3.CIDRepository{
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockPublisher := &mocks.IterativeIPLDPublisher{
|
||||
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockConverter := &mocks.IterativePayloadConverter{
|
||||
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockRetriever := &mocks3.MockCIDRetriever{
|
||||
FirstBlockNumberToReturn: 1,
|
||||
GapsToRetrieve: [][2]uint64{
|
||||
{
|
||||
100, 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
mockFetcher := &mocks2.StateDiffFetcher{
|
||||
PayloadsToReturn: map[uint64]statediff.Payload{
|
||||
100: mocks.MockStateDiffPayload,
|
||||
},
|
||||
}
|
||||
backfiller := &super_node.BackFillService{
|
||||
Repository: mockCidRepo,
|
||||
Publisher: mockPublisher,
|
||||
Converter: mockConverter,
|
||||
Fetcher: mockFetcher,
|
||||
Retriever: mockRetriever,
|
||||
GapCheckFrequency: time.Second * 2,
|
||||
BatchSize: super_node.DefaultMaxBatchSize,
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
quitChan := make(chan bool, 1)
|
||||
backfiller.FillGaps(wg, quitChan)
|
||||
time.Sleep(time.Second * 3)
|
||||
quitChan <- true
|
||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(1))
|
||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(1))
|
||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(1))
|
||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||
Expect(mockRetriever.CalledTimes).To(Equal(1))
|
||||
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
|
||||
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{100}))
|
||||
})
|
||||
|
||||
It("Finds beginning gap", func() {
|
||||
mockCidRepo := &mocks3.CIDRepository{
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockPublisher := &mocks.IterativeIPLDPublisher{
|
||||
ReturnCIDPayload: []*ipfs.CIDPayload{mocks.MockCIDPayload, mocks.MockCIDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockConverter := &mocks.IterativePayloadConverter{
|
||||
ReturnIPLDPayload: []*ipfs.IPLDPayload{mocks.MockIPLDPayload, mocks.MockIPLDPayload},
|
||||
ReturnErr: nil,
|
||||
}
|
||||
mockRetriever := &mocks3.MockCIDRetriever{
|
||||
FirstBlockNumberToReturn: 3,
|
||||
GapsToRetrieve: [][2]uint64{},
|
||||
}
|
||||
mockFetcher := &mocks2.StateDiffFetcher{
|
||||
PayloadsToReturn: map[uint64]statediff.Payload{
|
||||
1: mocks.MockStateDiffPayload,
|
||||
2: mocks.MockStateDiffPayload,
|
||||
},
|
||||
}
|
||||
backfiller := &super_node.BackFillService{
|
||||
Repository: mockCidRepo,
|
||||
Publisher: mockPublisher,
|
||||
Converter: mockConverter,
|
||||
Fetcher: mockFetcher,
|
||||
Retriever: mockRetriever,
|
||||
GapCheckFrequency: time.Second * 2,
|
||||
BatchSize: super_node.DefaultMaxBatchSize,
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
quitChan := make(chan bool, 1)
|
||||
backfiller.FillGaps(wg, quitChan)
|
||||
time.Sleep(time.Second * 3)
|
||||
quitChan <- true
|
||||
Expect(len(mockCidRepo.PassedCIDPayload)).To(Equal(2))
|
||||
Expect(mockCidRepo.PassedCIDPayload[0]).To(Equal(mocks.MockCIDPayload))
|
||||
Expect(mockCidRepo.PassedCIDPayload[1]).To(Equal(mocks.MockCIDPayload))
|
||||
Expect(len(mockPublisher.PassedIPLDPayload)).To(Equal(2))
|
||||
Expect(mockPublisher.PassedIPLDPayload[0]).To(Equal(mocks.MockIPLDPayload))
|
||||
Expect(mockPublisher.PassedIPLDPayload[1]).To(Equal(mocks.MockIPLDPayload))
|
||||
Expect(len(mockConverter.PassedStatediffPayload)).To(Equal(2))
|
||||
Expect(mockConverter.PassedStatediffPayload[0]).To(Equal(mocks.MockStateDiffPayload))
|
||||
Expect(mockConverter.PassedStatediffPayload[1]).To(Equal(mocks.MockStateDiffPayload))
|
||||
Expect(mockRetriever.CalledTimes).To(Equal(1))
|
||||
Expect(len(mockFetcher.CalledAtBlockHeights)).To(Equal(1))
|
||||
Expect(mockFetcher.CalledAtBlockHeights[0]).To(Equal([]uint64{1, 2}))
|
||||
})
|
||||
})
|
||||
})
|
246
pkg/super_node/filterer.go
Normal file
246
pkg/super_node/filterer.go
Normal file
@ -0,0 +1,246 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/libraries/shared/streamer"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// ResponseFilterer is the inteface used to screen eth data and package appropriate data into a response payload
|
||||
type ResponseFilterer interface {
|
||||
FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error)
|
||||
}
|
||||
|
||||
// Filterer is the underlying struct for the ResponseFilterer interface
|
||||
type Filterer struct{}
|
||||
|
||||
// NewResponseFilterer creates a new Filterer satisfying the ResponseFilterer interface
|
||||
func NewResponseFilterer() *Filterer {
|
||||
return &Filterer{}
|
||||
}
|
||||
|
||||
// FilterResponse is used to filter through eth data to extract and package requested data into a Payload
|
||||
func (s *Filterer) FilterResponse(streamFilters config.Subscription, payload ipfs.IPLDPayload) (streamer.SuperNodePayload, error) {
|
||||
response := new(streamer.SuperNodePayload)
|
||||
headersErr := s.filterHeaders(streamFilters, response, payload)
|
||||
if headersErr != nil {
|
||||
return streamer.SuperNodePayload{}, headersErr
|
||||
}
|
||||
txHashes, trxsErr := s.filterTransactions(streamFilters, response, payload)
|
||||
if trxsErr != nil {
|
||||
return streamer.SuperNodePayload{}, trxsErr
|
||||
}
|
||||
rctsErr := s.filerReceipts(streamFilters, response, payload, txHashes)
|
||||
if rctsErr != nil {
|
||||
return streamer.SuperNodePayload{}, rctsErr
|
||||
}
|
||||
stateErr := s.filterState(streamFilters, response, payload)
|
||||
if stateErr != nil {
|
||||
return streamer.SuperNodePayload{}, stateErr
|
||||
}
|
||||
storageErr := s.filterStorage(streamFilters, response, payload)
|
||||
if storageErr != nil {
|
||||
return streamer.SuperNodePayload{}, storageErr
|
||||
}
|
||||
response.BlockNumber = payload.BlockNumber
|
||||
return *response, nil
|
||||
}
|
||||
|
||||
func (s *Filterer) filterHeaders(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
|
||||
if !streamFilters.HeaderFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
|
||||
response.HeadersRlp = append(response.HeadersRlp, payload.HeaderRLP)
|
||||
if streamFilters.HeaderFilter.Uncles {
|
||||
for _, uncle := range payload.BlockBody.Uncles {
|
||||
uncleRlp, err := rlp.EncodeToBytes(uncle)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.UnclesRlp = append(response.UnclesRlp, uncleRlp)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRange(start, end, actual int64) bool {
|
||||
if (end <= 0 || end >= actual) && start <= actual {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Filterer) filterTransactions(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) ([]common.Hash, error) {
|
||||
trxHashes := make([]common.Hash, 0, len(payload.BlockBody.Transactions))
|
||||
if !streamFilters.TrxFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
|
||||
for i, trx := range payload.BlockBody.Transactions {
|
||||
if checkTransactions(streamFilters.TrxFilter.Src, streamFilters.TrxFilter.Dst, payload.TrxMetaData[i].Src, payload.TrxMetaData[i].Dst) {
|
||||
trxBuffer := new(bytes.Buffer)
|
||||
err := trx.EncodeRLP(trxBuffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trxHashes = append(trxHashes, trx.Hash())
|
||||
response.TransactionsRlp = append(response.TransactionsRlp, trxBuffer.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
return trxHashes, nil
|
||||
}
|
||||
|
||||
func checkTransactions(wantedSrc, wantedDst []string, actualSrc, actualDst string) bool {
|
||||
// If we aren't filtering for any addresses, every transaction is a go
|
||||
if len(wantedDst) == 0 && len(wantedSrc) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, src := range wantedSrc {
|
||||
if src == actualSrc {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, dst := range wantedDst {
|
||||
if dst == actualDst {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Filterer) filerReceipts(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload, trxHashes []common.Hash) error {
|
||||
if !streamFilters.ReceiptFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
|
||||
for i, receipt := range payload.Receipts {
|
||||
if checkReceipts(receipt, streamFilters.ReceiptFilter.Topic0s, payload.ReceiptMetaData[i].Topic0s, streamFilters.ReceiptFilter.Contracts, payload.ReceiptMetaData[i].ContractAddress, trxHashes) {
|
||||
receiptForStorage := (*types.ReceiptForStorage)(receipt)
|
||||
receiptBuffer := new(bytes.Buffer)
|
||||
err := receiptForStorage.EncodeRLP(receiptBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.ReceiptsRlp = append(response.ReceiptsRlp, receiptBuffer.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkReceipts(rct *types.Receipt, wantedTopics, actualTopics, wantedContracts []string, actualContract string, wantedTrxHashes []common.Hash) bool {
|
||||
// If we aren't filtering for any topics or contracts, all topics are a go
|
||||
if len(wantedTopics) == 0 && len(wantedContracts) == 0 {
|
||||
return true
|
||||
}
|
||||
// No matter what filters we have, we keep receipts for the trxs we are interested in
|
||||
for _, wantedTrxHash := range wantedTrxHashes {
|
||||
if bytes.Equal(wantedTrxHash.Bytes(), rct.TxHash.Bytes()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if len(wantedContracts) == 0 {
|
||||
// We keep all receipts that have logs we are interested in
|
||||
for _, wantedTopic := range wantedTopics {
|
||||
for _, actualTopic := range actualTopics {
|
||||
if wantedTopic == actualTopic {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // We keep receipts that belong to one of the specified contracts and have logs with topics if we aren't filtering on topics
|
||||
for _, wantedContract := range wantedContracts {
|
||||
if wantedContract == actualContract {
|
||||
if len(wantedTopics) == 0 {
|
||||
return true
|
||||
}
|
||||
// Or if we have contracts and topics to filter on we only keep receipts that satisfy both conditions
|
||||
for _, wantedTopic := range wantedTopics {
|
||||
for _, actualTopic := range actualTopics {
|
||||
if wantedTopic == actualTopic {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Filterer) filterState(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
|
||||
if !streamFilters.StateFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
|
||||
response.StateNodesRlp = make(map[common.Hash][]byte)
|
||||
keyFilters := make([]common.Hash, 0, len(streamFilters.StateFilter.Addresses))
|
||||
for _, addr := range streamFilters.StateFilter.Addresses {
|
||||
keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
|
||||
keyFilters = append(keyFilters, keyFilter)
|
||||
}
|
||||
for key, stateNode := range payload.StateNodes {
|
||||
if checkNodeKeys(keyFilters, key) {
|
||||
if stateNode.Leaf || streamFilters.StateFilter.IntermediateNodes {
|
||||
response.StateNodesRlp[key] = stateNode.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkNodeKeys(wantedKeys []common.Hash, actualKey common.Hash) bool {
|
||||
// If we aren't filtering for any specific keys, all nodes are a go
|
||||
if len(wantedKeys) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, key := range wantedKeys {
|
||||
if bytes.Equal(key.Bytes(), actualKey.Bytes()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Filterer) filterStorage(streamFilters config.Subscription, response *streamer.SuperNodePayload, payload ipfs.IPLDPayload) error {
|
||||
if !streamFilters.StorageFilter.Off && checkRange(streamFilters.StartingBlock.Int64(), streamFilters.EndingBlock.Int64(), payload.BlockNumber.Int64()) {
|
||||
response.StorageNodesRlp = make(map[common.Hash]map[common.Hash][]byte)
|
||||
stateKeyFilters := make([]common.Hash, 0, len(streamFilters.StorageFilter.Addresses))
|
||||
for _, addr := range streamFilters.StorageFilter.Addresses {
|
||||
keyFilter := ipfs.AddressToKey(common.HexToAddress(addr))
|
||||
stateKeyFilters = append(stateKeyFilters, keyFilter)
|
||||
}
|
||||
storageKeyFilters := make([]common.Hash, 0, len(streamFilters.StorageFilter.StorageKeys))
|
||||
for _, store := range streamFilters.StorageFilter.StorageKeys {
|
||||
keyFilter := ipfs.HexToKey(store)
|
||||
storageKeyFilters = append(storageKeyFilters, keyFilter)
|
||||
}
|
||||
for stateKey, storageNodes := range payload.StorageNodes {
|
||||
if checkNodeKeys(stateKeyFilters, stateKey) {
|
||||
response.StorageNodesRlp[stateKey] = make(map[common.Hash][]byte)
|
||||
for _, storageNode := range storageNodes {
|
||||
if checkNodeKeys(storageKeyFilters, storageNode.Key) {
|
||||
response.StorageNodesRlp[stateKey][storageNode.Key] = storageNode.Value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
153
pkg/super_node/filterer_test.go
Normal file
153
pkg/super_node/filterer_test.go
Normal file
@ -0,0 +1,153 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
)
|
||||
|
||||
var (
|
||||
filterer super_node.ResponseFilterer
|
||||
expectedRctForStorageRLP1 []byte
|
||||
expectedRctForStorageRLP2 []byte
|
||||
)
|
||||
|
||||
var _ = Describe("Filterer", func() {
|
||||
Describe("FilterResponse", func() {
|
||||
BeforeEach(func() {
|
||||
filterer = super_node.NewResponseFilterer()
|
||||
expectedRctForStorageRLP1 = getReceiptForStorageRLP(mocks.MockReceipts, 0)
|
||||
expectedRctForStorageRLP2 = getReceiptForStorageRLP(mocks.MockReceipts, 1)
|
||||
})
|
||||
|
||||
It("Transcribes all the data from the IPLDPayload into the SuperNodePayload if given an open filter", func() {
|
||||
superNodePayload, err := filterer.FilterResponse(openFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(superNodePayload.HeadersRlp).To(Equal(mocks.MockSeeNodePayload.HeadersRlp))
|
||||
Expect(superNodePayload.UnclesRlp).To(Equal(mocks.MockSeeNodePayload.UnclesRlp))
|
||||
Expect(len(superNodePayload.TransactionsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
|
||||
Expect(len(superNodePayload.ReceiptsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
|
||||
Expect(len(superNodePayload.StateNodesRlp)).To(Equal(2))
|
||||
Expect(superNodePayload.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
|
||||
Expect(superNodePayload.StateNodesRlp[mocks.AnotherContractLeafKey]).To(Equal(mocks.AnotherValueBytes))
|
||||
Expect(superNodePayload.StorageNodesRlp).To(Equal(mocks.MockSeeNodePayload.StorageNodesRlp))
|
||||
})
|
||||
|
||||
It("Applies filters from the provided config.Subscription", func() {
|
||||
superNodePayload1, err := filterer.FilterResponse(rctContractFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload1.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload1.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload1.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload1.TransactionsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload1.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload1.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload1.ReceiptsRlp)).To(Equal(1))
|
||||
Expect(superNodePayload1.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
|
||||
|
||||
superNodePayload2, err := filterer.FilterResponse(rctTopicsFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload2.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload2.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload2.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload2.TransactionsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload2.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload2.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload2.ReceiptsRlp)).To(Equal(1))
|
||||
Expect(superNodePayload2.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
|
||||
|
||||
superNodePayload3, err := filterer.FilterResponse(rctTopicsAndContractFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload3.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload3.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload3.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload3.TransactionsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload3.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload3.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload3.ReceiptsRlp)).To(Equal(1))
|
||||
Expect(superNodePayload3.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP1))
|
||||
|
||||
superNodePayload4, err := filterer.FilterResponse(rctContractsAndTopicFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload4.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload4.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload4.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload4.TransactionsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload4.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload4.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload4.ReceiptsRlp)).To(Equal(1))
|
||||
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
|
||||
|
||||
superNodePayload5, err := filterer.FilterResponse(rctsForAllCollectedTrxs, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload5.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload5.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload5.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload5.TransactionsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(0))).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
|
||||
Expect(len(superNodePayload5.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload5.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload5.ReceiptsRlp)).To(Equal(2))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP1)).To(BeTrue())
|
||||
Expect(super_node.ListContainsBytes(superNodePayload5.ReceiptsRlp, expectedRctForStorageRLP2)).To(BeTrue())
|
||||
|
||||
superNodePayload6, err := filterer.FilterResponse(rctsForSelectCollectedTrxs, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload6.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload6.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload6.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload6.TransactionsRlp)).To(Equal(1))
|
||||
Expect(super_node.ListContainsBytes(superNodePayload5.TransactionsRlp, mocks.MockTransactions.GetRlp(1))).To(BeTrue())
|
||||
Expect(len(superNodePayload6.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload6.StateNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload6.ReceiptsRlp)).To(Equal(1))
|
||||
Expect(superNodePayload4.ReceiptsRlp[0]).To(Equal(expectedRctForStorageRLP2))
|
||||
|
||||
superNodePayload7, err := filterer.FilterResponse(stateFilter, *mocks.MockIPLDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(superNodePayload7.BlockNumber.Int64()).To(Equal(mocks.MockSeeNodePayload.BlockNumber.Int64()))
|
||||
Expect(len(superNodePayload7.HeadersRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload7.UnclesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload7.TransactionsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload7.StorageNodesRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload7.ReceiptsRlp)).To(Equal(0))
|
||||
Expect(len(superNodePayload7.StateNodesRlp)).To(Equal(1))
|
||||
Expect(superNodePayload7.StateNodesRlp[mocks.ContractLeafKey]).To(Equal(mocks.ValueBytes))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getReceiptForStorageRLP(receipts types.Receipts, i int) []byte {
|
||||
receiptForStorage := (*types.ReceiptForStorage)(receipts[i])
|
||||
receiptBuffer := new(bytes.Buffer)
|
||||
err := receiptForStorage.EncodeRLP(receiptBuffer)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
return receiptBuffer.Bytes()
|
||||
}
|
31
pkg/super_node/mocks/repository.go
Normal file
31
pkg/super_node/mocks/repository.go
Normal file
@ -0,0 +1,31 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mocks
|
||||
|
||||
import "github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
|
||||
// CIDRepository is the underlying struct for the Repository interface
|
||||
type CIDRepository struct {
|
||||
PassedCIDPayload []*ipfs.CIDPayload
|
||||
ReturnErr error
|
||||
}
|
||||
|
||||
// Index indexes a cidPayload in Postgres
|
||||
func (repo *CIDRepository) Index(cidPayload *ipfs.CIDPayload) error {
|
||||
repo.PassedCIDPayload = append(repo.PassedCIDPayload, cidPayload)
|
||||
return repo.ReturnErr
|
||||
}
|
44
pkg/super_node/mocks/retriever.go
Normal file
44
pkg/super_node/mocks/retriever.go
Normal file
@ -0,0 +1,44 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// MockCIDRetriever is a mock CID retriever for use in tests
|
||||
type MockCIDRetriever struct {
|
||||
GapsToRetrieve [][2]uint64
|
||||
GapsToRetrieveErr error
|
||||
CalledTimes int
|
||||
FirstBlockNumberToReturn int64
|
||||
RetrieveFirstBlockNumberErr error
|
||||
}
|
||||
|
||||
// RetrieveCIDs mock method
|
||||
func (*MockCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// RetrieveLastBlockNumber mock method
|
||||
func (*MockCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// RetrieveFirstBlockNumber mock method
|
||||
func (mcr *MockCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
|
||||
return mcr.FirstBlockNumberToReturn, mcr.RetrieveFirstBlockNumberErr
|
||||
}
|
||||
|
||||
// RetrieveGapsInData mock method
|
||||
func (mcr *MockCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
|
||||
mcr.CalledTimes++
|
||||
return mcr.GapsToRetrieve, mcr.GapsToRetrieveErr
|
||||
}
|
||||
|
||||
// SetGapsToRetrieve mock method
|
||||
func (mcr *MockCIDRetriever) SetGapsToRetrieve(gaps [][2]uint64) {
|
||||
if mcr.GapsToRetrieve == nil {
|
||||
mcr.GapsToRetrieve = make([][2]uint64, 0)
|
||||
}
|
||||
mcr.GapsToRetrieve = append(mcr.GapsToRetrieve, gaps...)
|
||||
}
|
156
pkg/super_node/repository.go
Normal file
156
pkg/super_node/repository.go
Normal file
@ -0,0 +1,156 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// CIDRepository is an interface for indexing ipfs.CIDPayloads
|
||||
type CIDRepository interface {
|
||||
Index(cidPayload *ipfs.CIDPayload) error
|
||||
}
|
||||
|
||||
// Repository is the underlying struct for the CIDRepository interface
|
||||
type Repository struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
// NewCIDRepository creates a new pointer to a Repository which satisfies the CIDRepository interface
|
||||
func NewCIDRepository(db *postgres.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Index indexes a cidPayload in Postgres
|
||||
func (repo *Repository) Index(cidPayload *ipfs.CIDPayload) error {
|
||||
tx, beginErr := repo.db.Beginx()
|
||||
if beginErr != nil {
|
||||
return beginErr
|
||||
}
|
||||
headerID, headerErr := repo.indexHeaderCID(tx, cidPayload.HeaderCID, cidPayload.BlockNumber, cidPayload.BlockHash.Hex())
|
||||
if headerErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
return headerErr
|
||||
}
|
||||
for uncleHash, cid := range cidPayload.UncleCIDs {
|
||||
uncleErr := repo.indexUncleCID(tx, cid, cidPayload.BlockNumber, uncleHash.Hex())
|
||||
if uncleErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
return uncleErr
|
||||
}
|
||||
}
|
||||
trxAndRctErr := repo.indexTransactionAndReceiptCIDs(tx, cidPayload, headerID)
|
||||
if trxAndRctErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
return trxAndRctErr
|
||||
}
|
||||
stateAndStorageErr := repo.indexStateAndStorageCIDs(tx, cidPayload, headerID)
|
||||
if stateAndStorageErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
return stateAndStorageErr
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (repo *Repository) indexHeaderCID(tx *sqlx.Tx, cid, blockNumber, hash string) (int64, error) {
|
||||
var headerID int64
|
||||
err := tx.QueryRowx(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4)
|
||||
RETURNING id`,
|
||||
blockNumber, hash, cid, false).Scan(&headerID)
|
||||
return headerID, err
|
||||
}
|
||||
|
||||
func (repo *Repository) indexUncleCID(tx *sqlx.Tx, cid, blockNumber, hash string) error {
|
||||
_, err := tx.Exec(`INSERT INTO public.header_cids (block_number, block_hash, cid, uncle) VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (block_number, block_hash) DO UPDATE SET (cid, uncle) = ($3, $4)`,
|
||||
blockNumber, hash, cid, true)
|
||||
return err
|
||||
}
|
||||
|
||||
func (repo *Repository) indexTransactionAndReceiptCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
|
||||
for hash, trxCidMeta := range payload.TransactionCIDs {
|
||||
var txID int64
|
||||
queryErr := tx.QueryRowx(`INSERT INTO public.transaction_cids (header_id, tx_hash, cid, dst, src) VALUES ($1, $2, $3, $4, $5)
|
||||
ON CONFLICT (header_id, tx_hash) DO UPDATE SET (cid, dst, src) = ($3, $4, $5)
|
||||
RETURNING id`,
|
||||
headerID, hash.Hex(), trxCidMeta.CID, trxCidMeta.Dst, trxCidMeta.Src).Scan(&txID)
|
||||
if queryErr != nil {
|
||||
return queryErr
|
||||
}
|
||||
receiptCidMeta, ok := payload.ReceiptCIDs[hash]
|
||||
if ok {
|
||||
rctErr := repo.indexReceiptCID(tx, receiptCidMeta, txID)
|
||||
if rctErr != nil {
|
||||
return rctErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) indexReceiptCID(tx *sqlx.Tx, cidMeta *ipfs.ReceiptMetaData, txID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO public.receipt_cids (tx_id, cid, contract, topic0s) VALUES ($1, $2, $3, $4)`,
|
||||
txID, cidMeta.CID, cidMeta.ContractAddress, pq.Array(cidMeta.Topic0s))
|
||||
return err
|
||||
}
|
||||
|
||||
func (repo *Repository) indexStateAndStorageCIDs(tx *sqlx.Tx, payload *ipfs.CIDPayload, headerID int64) error {
|
||||
for accountKey, stateCID := range payload.StateNodeCIDs {
|
||||
var stateID int64
|
||||
queryErr := tx.QueryRowx(`INSERT INTO public.state_cids (header_id, state_key, cid, leaf) VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (header_id, state_key) DO UPDATE SET (cid, leaf) = ($3, $4)
|
||||
RETURNING id`,
|
||||
headerID, accountKey.Hex(), stateCID.CID, stateCID.Leaf).Scan(&stateID)
|
||||
if queryErr != nil {
|
||||
return queryErr
|
||||
}
|
||||
for _, storageCID := range payload.StorageNodeCIDs[accountKey] {
|
||||
storageErr := repo.indexStorageCID(tx, storageCID, stateID)
|
||||
if storageErr != nil {
|
||||
return storageErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) indexStorageCID(tx *sqlx.Tx, storageCID ipfs.StorageNodeCID, stateID int64) error {
|
||||
_, err := tx.Exec(`INSERT INTO public.storage_cids (state_id, storage_key, cid, leaf) VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (state_id, storage_key) DO UPDATE SET (cid, leaf) = ($3, $4)`,
|
||||
stateID, storageCID.Key, storageCID.CID, storageCID.Leaf)
|
||||
return err
|
||||
}
|
110
pkg/super_node/repository_test.go
Normal file
110
pkg/super_node/repository_test.go
Normal file
@ -0,0 +1,110 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs/mocks"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/super_node"
|
||||
)
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
var (
|
||||
db *postgres.DB
|
||||
err error
|
||||
repo super_node.CIDRepository
|
||||
)
|
||||
BeforeEach(func() {
|
||||
db, err = super_node.SetupDB()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
repo = super_node.NewCIDRepository(db)
|
||||
})
|
||||
AfterEach(func() {
|
||||
super_node.TearDownDB(db)
|
||||
})
|
||||
|
||||
Describe("Index", func() {
|
||||
It("Indexes CIDs and related metadata into vulcanizedb", func() {
|
||||
err = repo.Index(mocks.MockCIDPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
pgStr := `SELECT cid FROM header_cids
|
||||
WHERE block_number = $1 AND uncle IS FALSE`
|
||||
// check header was properly indexed
|
||||
headers := make([]string, 0)
|
||||
err = db.Select(&headers, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(1))
|
||||
Expect(headers[0]).To(Equal("mockHeaderCID"))
|
||||
// check trxs were properly indexed
|
||||
trxs := make([]string, 0)
|
||||
pgStr = `SELECT transaction_cids.cid FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&trxs, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(trxs)).To(Equal(2))
|
||||
Expect(super_node.ListContainsString(trxs, "mockTrxCID1")).To(BeTrue())
|
||||
Expect(super_node.ListContainsString(trxs, "mockTrxCID2")).To(BeTrue())
|
||||
// check receipts were properly indexed
|
||||
rcts := make([]string, 0)
|
||||
pgStr = `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&rcts, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(rcts)).To(Equal(2))
|
||||
Expect(super_node.ListContainsString(rcts, "mockRctCID1")).To(BeTrue())
|
||||
Expect(super_node.ListContainsString(rcts, "mockRctCID2")).To(BeTrue())
|
||||
// check that state nodes were properly indexed
|
||||
stateNodes := make([]ipfs.StateNodeCID, 0)
|
||||
pgStr = `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
err = db.Select(&stateNodes, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(stateNodes)).To(Equal(2))
|
||||
for _, stateNode := range stateNodes {
|
||||
if stateNode.CID == "mockStateCID1" {
|
||||
Expect(stateNode.Leaf).To(Equal(true))
|
||||
Expect(stateNode.Key).To(Equal(mocks.ContractLeafKey.Hex()))
|
||||
}
|
||||
if stateNode.CID == "mockStateCID2" {
|
||||
Expect(stateNode.Leaf).To(Equal(true))
|
||||
Expect(stateNode.Key).To(Equal(mocks.AnotherContractLeafKey.Hex()))
|
||||
}
|
||||
}
|
||||
// check that storage nodes were properly indexed
|
||||
storageNodes := make([]ipfs.StorageNodeCID, 0)
|
||||
pgStr = `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
|
||||
WHERE storage_cids.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
err = db.Select(&storageNodes, pgStr, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(storageNodes)).To(Equal(1))
|
||||
Expect(storageNodes[0]).To(Equal(ipfs.StorageNodeCID{
|
||||
CID: "mockStorageCID",
|
||||
Leaf: true,
|
||||
Key: "0x0000000000000000000000000000000000000000000000000000000000000001",
|
||||
StateKey: mocks.ContractLeafKey.Hex(),
|
||||
}))
|
||||
})
|
||||
})
|
||||
})
|
336
pkg/super_node/retriever.go
Normal file
336
pkg/super_node/retriever.go
Normal file
@ -0,0 +1,336 @@
|
||||
// VulcanizeDB
|
||||
// Copyright © 2019 Vulcanize
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package super_node
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/lib/pq"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/config"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/ipfs"
|
||||
)
|
||||
|
||||
// CIDRetriever is the interface for retrieving CIDs from the Postgres cache
|
||||
type CIDRetriever interface {
|
||||
RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error)
|
||||
RetrieveLastBlockNumber() (int64, error)
|
||||
RetrieveFirstBlockNumber() (int64, error)
|
||||
RetrieveGapsInData() ([][2]uint64, error)
|
||||
}
|
||||
|
||||
// EthCIDRetriever is the underlying struct supporting the CIDRetriever interface
|
||||
type EthCIDRetriever struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
// NewCIDRetriever returns a pointer to a new EthCIDRetriever which supports the CIDRetriever interface
|
||||
func NewCIDRetriever(db *postgres.DB) *EthCIDRetriever {
|
||||
return &EthCIDRetriever{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveFirstBlockNumber is used to retrieve the first block number in the db
|
||||
func (ecr *EthCIDRetriever) RetrieveFirstBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number ASC LIMIT 1")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
// RetrieveLastBlockNumber is used to retrieve the latest block number in the db
|
||||
func (ecr *EthCIDRetriever) RetrieveLastBlockNumber() (int64, error) {
|
||||
var blockNumber int64
|
||||
err := ecr.db.Get(&blockNumber, "SELECT block_number FROM header_cids ORDER BY block_number DESC LIMIT 1 ")
|
||||
return blockNumber, err
|
||||
}
|
||||
|
||||
// RetrieveCIDs is used to retrieve all of the CIDs which conform to the passed StreamFilters
|
||||
func (ecr *EthCIDRetriever) RetrieveCIDs(streamFilters config.Subscription, blockNumber int64) (*ipfs.CIDWrapper, error) {
|
||||
log.Debug("retrieving cids")
|
||||
tx, beginErr := ecr.db.Beginx()
|
||||
if beginErr != nil {
|
||||
return nil, beginErr
|
||||
}
|
||||
// THIS IS SUPER EXPENSIVE HAVING TO CYCLE THROUGH EACH BLOCK, NEED BETTER WAY TO FETCH CIDS
|
||||
// WHILE STILL MAINTAINING RELATION INFO ABOUT WHAT BLOCK THE CIDS BELONG TO
|
||||
cw := new(ipfs.CIDWrapper)
|
||||
cw.BlockNumber = big.NewInt(blockNumber)
|
||||
|
||||
// Retrieve cached header CIDs
|
||||
if !streamFilters.HeaderFilter.Off {
|
||||
var headersErr error
|
||||
cw.Headers, headersErr = ecr.retrieveHeaderCIDs(tx, streamFilters, blockNumber)
|
||||
if headersErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("header cid retrieval error")
|
||||
return nil, headersErr
|
||||
}
|
||||
if streamFilters.HeaderFilter.Uncles {
|
||||
var unclesErr error
|
||||
cw.Uncles, unclesErr = ecr.retrieveUncleCIDs(tx, streamFilters, blockNumber)
|
||||
if unclesErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("uncle cid retrieval error")
|
||||
return nil, unclesErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cached trx CIDs
|
||||
var trxIds []int64
|
||||
if !streamFilters.TrxFilter.Off {
|
||||
var trxsErr error
|
||||
cw.Transactions, trxIds, trxsErr = ecr.retrieveTrxCIDs(tx, streamFilters, blockNumber)
|
||||
if trxsErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("transaction cid retrieval error")
|
||||
return nil, trxsErr
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cached receipt CIDs
|
||||
if !streamFilters.ReceiptFilter.Off {
|
||||
var rctsErr error
|
||||
cw.Receipts, rctsErr = ecr.retrieveRctCIDs(tx, streamFilters, blockNumber, trxIds)
|
||||
if rctsErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("receipt cid retrieval error")
|
||||
return nil, rctsErr
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cached state CIDs
|
||||
if !streamFilters.StateFilter.Off {
|
||||
var stateErr error
|
||||
cw.StateNodes, stateErr = ecr.retrieveStateCIDs(tx, streamFilters, blockNumber)
|
||||
if stateErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("state cid retrieval error")
|
||||
return nil, stateErr
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve cached storage CIDs
|
||||
if !streamFilters.StorageFilter.Off {
|
||||
var storageErr error
|
||||
cw.StorageNodes, storageErr = ecr.retrieveStorageCIDs(tx, streamFilters, blockNumber)
|
||||
if storageErr != nil {
|
||||
rollbackErr := tx.Rollback()
|
||||
if rollbackErr != nil {
|
||||
log.Error(rollbackErr)
|
||||
}
|
||||
log.Error("storage cid retrieval error")
|
||||
return nil, storageErr
|
||||
}
|
||||
}
|
||||
|
||||
return cw, tx.Commit()
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveHeaderCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) {
|
||||
log.Debug("retrieving header cids for block ", blockNumber)
|
||||
headers := make([]string, 0)
|
||||
pgStr := `SELECT cid FROM header_cids
|
||||
WHERE block_number = $1 AND uncle IS FALSE`
|
||||
err := tx.Select(&headers, pgStr, blockNumber)
|
||||
return headers, err
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveUncleCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, error) {
|
||||
log.Debug("retrieving header cids for block ", blockNumber)
|
||||
headers := make([]string, 0)
|
||||
pgStr := `SELECT cid FROM header_cids
|
||||
WHERE block_number = $1 AND uncle IS TRUE`
|
||||
err := tx.Select(&headers, pgStr, blockNumber)
|
||||
return headers, err
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveTrxCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]string, []int64, error) {
|
||||
log.Debug("retrieving transaction cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 3)
|
||||
type result struct {
|
||||
ID int64 `db:"id"`
|
||||
Cid string `db:"cid"`
|
||||
}
|
||||
results := make([]result, 0)
|
||||
pgStr := `SELECT transaction_cids.id, transaction_cids.cid FROM transaction_cids INNER JOIN header_cids ON (transaction_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
args = append(args, blockNumber)
|
||||
if len(streamFilters.TrxFilter.Dst) > 0 {
|
||||
pgStr += ` AND transaction_cids.dst = ANY($2::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(streamFilters.TrxFilter.Dst))
|
||||
}
|
||||
if len(streamFilters.TrxFilter.Src) > 0 {
|
||||
pgStr += ` AND transaction_cids.src = ANY($3::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(streamFilters.TrxFilter.Src))
|
||||
}
|
||||
err := tx.Select(&results, pgStr, args...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ids := make([]int64, 0, len(results))
|
||||
cids := make([]string, 0, len(results))
|
||||
for _, res := range results {
|
||||
cids = append(cids, res.Cid)
|
||||
ids = append(ids, res.ID)
|
||||
}
|
||||
return cids, ids, nil
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveRctCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64, trxIds []int64) ([]string, error) {
|
||||
log.Debug("retrieving receipt cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 4)
|
||||
pgStr := `SELECT receipt_cids.cid FROM receipt_cids, transaction_cids, header_cids
|
||||
WHERE receipt_cids.tx_id = transaction_cids.id
|
||||
AND transaction_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
args = append(args, blockNumber)
|
||||
if len(streamFilters.ReceiptFilter.Topic0s) > 0 {
|
||||
pgStr += ` AND ((receipt_cids.topic0s && $2::VARCHAR(66)[]`
|
||||
args = append(args, pq.Array(streamFilters.ReceiptFilter.Topic0s))
|
||||
if len(streamFilters.ReceiptFilter.Contracts) > 0 {
|
||||
pgStr += ` AND receipt_cids.contract = ANY($3::VARCHAR(66)[]))`
|
||||
args = append(args, pq.Array(streamFilters.ReceiptFilter.Contracts))
|
||||
if len(trxIds) > 0 {
|
||||
pgStr += ` OR receipt_cids.tx_id = ANY($4::INTEGER[]))`
|
||||
args = append(args, pq.Array(trxIds))
|
||||
} else {
|
||||
pgStr += `)`
|
||||
}
|
||||
} else {
|
||||
pgStr += `)`
|
||||
if len(trxIds) > 0 {
|
||||
pgStr += ` OR receipt_cids.tx_id = ANY($3::INTEGER[]))`
|
||||
args = append(args, pq.Array(trxIds))
|
||||
} else {
|
||||
pgStr += `)`
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(streamFilters.ReceiptFilter.Contracts) > 0 {
|
||||
pgStr += ` AND (receipt_cids.contract = ANY($2::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(streamFilters.ReceiptFilter.Contracts))
|
||||
if len(trxIds) > 0 {
|
||||
pgStr += ` OR receipt_cids.tx_id = ANY($3::INTEGER[]))`
|
||||
args = append(args, pq.Array(trxIds))
|
||||
} else {
|
||||
pgStr += `)`
|
||||
}
|
||||
} else if len(trxIds) > 0 {
|
||||
pgStr += ` AND receipt_cids.tx_id = ANY($2::INTEGER[])`
|
||||
args = append(args, pq.Array(trxIds))
|
||||
}
|
||||
}
|
||||
receiptCids := make([]string, 0)
|
||||
err := tx.Select(&receiptCids, pgStr, args...)
|
||||
return receiptCids, err
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveStateCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]ipfs.StateNodeCID, error) {
|
||||
log.Debug("retrieving state cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 2)
|
||||
pgStr := `SELECT state_cids.cid, state_cids.state_key, state_cids.leaf FROM state_cids INNER JOIN header_cids ON (state_cids.header_id = header_cids.id)
|
||||
WHERE header_cids.block_number = $1`
|
||||
args = append(args, blockNumber)
|
||||
addrLen := len(streamFilters.StateFilter.Addresses)
|
||||
if addrLen > 0 {
|
||||
keys := make([]string, 0, addrLen)
|
||||
for _, addr := range streamFilters.StateFilter.Addresses {
|
||||
keys = append(keys, ipfs.HexToKey(addr).Hex())
|
||||
}
|
||||
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(keys))
|
||||
}
|
||||
if !streamFilters.StorageFilter.IntermediateNodes {
|
||||
pgStr += ` AND state_cids.leaf = TRUE`
|
||||
}
|
||||
stateNodeCIDs := make([]ipfs.StateNodeCID, 0)
|
||||
err := tx.Select(&stateNodeCIDs, pgStr, args...)
|
||||
return stateNodeCIDs, err
|
||||
}
|
||||
|
||||
func (ecr *EthCIDRetriever) retrieveStorageCIDs(tx *sqlx.Tx, streamFilters config.Subscription, blockNumber int64) ([]ipfs.StorageNodeCID, error) {
|
||||
log.Debug("retrieving storage cids for block ", blockNumber)
|
||||
args := make([]interface{}, 0, 3)
|
||||
pgStr := `SELECT storage_cids.cid, state_cids.state_key, storage_cids.storage_key, storage_cids.leaf FROM storage_cids, state_cids, header_cids
|
||||
WHERE storage_cids.state_id = state_cids.id
|
||||
AND state_cids.header_id = header_cids.id
|
||||
AND header_cids.block_number = $1`
|
||||
args = append(args, blockNumber)
|
||||
addrLen := len(streamFilters.StorageFilter.Addresses)
|
||||
if addrLen > 0 {
|
||||
keys := make([]string, 0, addrLen)
|
||||
for _, addr := range streamFilters.StorageFilter.Addresses {
|
||||
keys = append(keys, ipfs.HexToKey(addr).Hex())
|
||||
}
|
||||
pgStr += ` AND state_cids.state_key = ANY($2::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(keys))
|
||||
}
|
||||
if len(streamFilters.StorageFilter.StorageKeys) > 0 {
|
||||
pgStr += ` AND storage_cids.storage_key = ANY($3::VARCHAR(66)[])`
|
||||
args = append(args, pq.Array(streamFilters.StorageFilter.StorageKeys))
|
||||
}
|
||||
if !streamFilters.StorageFilter.IntermediateNodes {
|
||||
pgStr += ` AND storage_cids.leaf = TRUE`
|
||||
}
|
||||
storageNodeCIDs := make([]ipfs.StorageNodeCID, 0)
|
||||
err := tx.Select(&storageNodeCIDs, pgStr, args...)
|
||||
return storageNodeCIDs, err
|
||||
}
|
||||
|
||||
type gap struct {
|
||||
Start uint64 `db:"start"`
|
||||
Stop uint64 `db:"stop"`
|
||||
}
|
||||
|
||||
// RetrieveGapsInData is used to find the the block numbers at which we are missing data in the db
|
||||
func (ecr *EthCIDRetriever) RetrieveGapsInData() ([][2]uint64, error) {
|
||||
pgStr := `SELECT header_cids.block_number + 1 AS start, min(fr.block_number) - 1 AS stop FROM header_cids
|
||||
LEFT JOIN header_cids r on header_cids.block_number = r.block_number - 1
|
||||
LEFT JOIN header_cids fr on header_cids.block_number < fr.block_number
|
||||
WHERE r.block_number is NULL and fr.block_number IS NOT NULL
|
||||
GROUP BY header_cids.block_number, r.block_number`
|
||||
gaps := make([]gap, 0)
|
||||
err := ecr.db.Select(&gaps, pgStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gapRanges := make([][2]uint64, 0)
|
||||
for _, gap := range gaps {
|
||||
gapRanges = append(gapRanges, [2]uint64{gap.Start, gap.Stop})
|
||||
}
|
||||
return gapRanges, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user